Trying to run of the shelf Hello World in Canopy editor - got the error bellow
In [1]: %run /home/smarkov/Enthought/Canopy_64bit/User/Examples/enaml-0.2.0/hello_world/hello_world.py
---------------------------------------------------------------------------
NameError Traceback (most recent call last)
/home/smarkov/Canopy/appdata/canopy-1.4.0.1938.rh5-x86_64/lib/python2.7/site-packages/IPython/utils/py3compat.pyc in execfile(fname, *where)
202 else:
203 filename = fname
--> 204 __builtin__.execfile(filename, *where)
/home/smarkov/Enthought/Canopy_64bit/User/Examples/enaml-0.2.0/hello_world/hello_world.py in <module>()
6
7 with enaml.imports():
----> 8 from hello_world_view import MyMessageToTheWorld
9
10 view = MyMessageToTheWorld(message="Hello, world!")
/run/media/smarkov/Data/enaml-0.6.8/enaml/core/import_hooks.py in load_module(self, fullname)
129 # module code of an Enaml file.
130 with imports():
--> 131 exec code in mod.__dict__
132 return mod
133
/home/smarkov/Enthought/Canopy_64bit/User/Examples/enaml-0.2.0/hello_world/hello_world_view.enaml in ()
3 # All rights reserved.
4 #------------------------------------------------------------------------------
----> 5 enamldef MyMessageToTheWorld(MainWindow):
6 attr message
7 Container:
NameError: name 'MainWindow' is not defined
Related
Code is below:
with open('models/shap_explainer.pkl', 'rb') as f:
explainer = pickle.load(f)
with open('models/adult_model.pkl', 'rb') as f:
adult_model = pickle.load(f)
Erro like this:
AttributeError Traceback (most recent call last)
~\AppData\Local\Temp\ipykernel_3264\2761765422.py in <module>
16 shap_values = pickle.load(f)
17 with open('models/shap_explainer.pkl', 'rb') as f:
---> 18 explainer = pickle.load(f)
19 with open('models/adult_model.pkl', 'rb') as f:
20 adult_model = pickle.load(f)
D:\Software\Anaconda3\envs\iAI\lib\site-packages\numba\serialize.py in <module>
18 from types import FunctionType, ModuleType
19
---> 20 from . import bytecode, compiler
21
22
D:\Software\Anaconda3\envs\iAI\lib\site-packages\numba\compiler.py in <module>
9 from numba import (bytecode, interpreter, postproc, typing, utils, config,
10 errors,)
---> 11 from numba.targets import cpu, callconv
12 from numba.parfor import ParforDiagnostics
13 from numba.inline_closurecall import InlineClosureCallPass
D:\Software\Anaconda3\envs\iAI\lib\site-packages\numba\targets\cpu.py in <module>
9 from numba import _dynfunc, config
10 from numba.callwrapper import PyCallWrapper
---> 11 from .base import BaseContext, PYOBJECT
12 from numba import utils, cgutils, types
13 from numba.utils import cached_property
D:\Software\Anaconda3\envs\iAI\lib\site-packages\numba\targets\base.py in <module>
23 builtin_registry, impl_ret_borrowed,
24 RegistryLoader)
---> 25 from numba import datamodel
26
27 GENERIC_POINTER = Type.pointer(Type.int(8))
D:\Software\Anaconda3\envs\iAI\lib\site-packages\numba\datamodel\_init_.py in <module>
2 from .packer import ArgPacker, DataPacker
3 from .registry import register_default, default_manager, register
----> 4 from .models import PrimitiveModel, CompositeModel, StructModel
D:\Software\Anaconda3\envs\iAI\lib\site-packages\numba\datamodel\models.py in <module>
304 #register_default(types.ExceptionInstance)
305 #register_default(types.ExternalFunction)
--> 306 #register_default(types.Macro)
307 #register_default(types.EnumClass)
308 #register_default(types.IntEnumClass)
AttributeError: module 'numba.types' has no attribute 'Macro'
I have no idea with it , i have searched for the solution but found nothing about it.
Please help to load above 2 pkl file successfully
After updating to spaCy 3.0.6 I haven't been able to load in either of the trained pipelines, although both seem to be properly installed:
================= Installed pipeline packages (spaCy v3.0.6) =================
ℹ spaCy installation:
/Users/baconbaker/anaconda3/envs/ml/lib/python3.8/site-packages/spacy
NAME SPACY VERSION
en_core_web_sm >=3.0.0,<3.1.0 3.0.0 ✔
en_core_web_trf >=3.0.0,<3.1.0 3.0.0 ✔
This occcurs when using spacy.load() and importing the pipelines as a module (error is identical for all of the following lines):
nlp = spacy.load("en_core_web_trf")
nlp = spacy.load("en_core_web_sm")
import en_core_web_sm
nlp = en_core_web_sm.load()
import en_core_web_trf
nlp = en_core_web_trf.load()
The error I'm getting is the following:
---------------------------------------------------------------------------
ImportError Traceback (most recent call last)
<ipython-input-9-b38eb3aae320> in <module>
1 import en_core_web_trf
----> 2 nlp = en_core_web_trf.load()
~/anaconda3/envs/ml/lib/python3.8/site-packages/en_core_web_trf/__init__.py in load(**overrides)
8
9 def load(**overrides):
---> 10 return load_model_from_init_py(__file__, **overrides)
~/anaconda3/envs/ml/lib/python3.8/site-packages/spacy/util.py in load_model_from_init_py(init_file, vocab, disable, exclude, config)
514 if not model_path.exists():
515 raise IOError(Errors.E052.format(path=data_path))
--> 516 return load_model_from_path(
517 data_path,
518 vocab=vocab,
~/anaconda3/envs/ml/lib/python3.8/site-packages/spacy/util.py in load_model_from_path(model_path, meta, vocab, disable, exclude, config)
389 config_path = model_path / "config.cfg"
390 config = load_config(config_path, overrides=dict_to_dot(config))
--> 391 nlp = load_model_from_config(config, vocab=vocab, disable=disable, exclude=exclude)
392 return nlp.from_disk(model_path, exclude=exclude)
393
~/anaconda3/envs/ml/lib/python3.8/site-packages/spacy/util.py in load_model_from_config(config, vocab, disable, exclude, auto_fill, validate)
426 # registry, including custom subclasses provided via entry points
427 lang_cls = get_lang_class(nlp_config["lang"])
--> 428 nlp = lang_cls.from_config(
429 config,
430 vocab=vocab,
~/anaconda3/envs/ml/lib/python3.8/site-packages/spacy/language.py in from_config(cls, config, vocab, disable, exclude, meta, auto_fill, validate)
1637 # then we would load them twice at runtime: once when we make from config,
1638 # and then again when we load from disk.
-> 1639 nlp = lang_cls(vocab=vocab, create_tokenizer=create_tokenizer, meta=meta)
1640 if after_creation is not None:
1641 nlp = after_creation(nlp)
~/anaconda3/envs/ml/lib/python3.8/site-packages/spacy/language.py in __init__(self, vocab, max_length, meta, create_tokenizer, batch_size, **kwargs)
148 # points. The factory decorator applied to these functions takes care
149 # of the rest.
--> 150 util.registry._entry_point_factories.get_all()
151
152 self._config = DEFAULT_CONFIG.merge(self.default_config)
~/anaconda3/envs/ml/lib/python3.8/site-packages/catalogue/__init__.py in get_all(self)
106 result = {}
107 if self.entry_points:
--> 108 result.update(self.get_entry_points())
109 for keys, value in REGISTRY.items():
110 if len(self.namespace) == len(keys) - 1 and all(
~/anaconda3/envs/ml/lib/python3.8/site-packages/catalogue/__init__.py in get_entry_points(self)
121 result = {}
122 for entry_point in AVAILABLE_ENTRY_POINTS.get(self.entry_point_namespace, []):
--> 123 result[entry_point.name] = entry_point.load()
124 return result
125
~/anaconda3/envs/ml/lib/python3.8/importlib/metadata.py in load(self)
75 """
76 match = self.pattern.match(self.value)
---> 77 module = import_module(match.group('module'))
78 attrs = filter(None, (match.group('attr') or '').split('.'))
79 return functools.reduce(getattr, attrs, module)
~/anaconda3/envs/ml/lib/python3.8/importlib/__init__.py in import_module(name, package)
125 break
126 level += 1
--> 127 return _bootstrap._gcd_import(name[level:], package, level)
128
129
~/anaconda3/envs/ml/lib/python3.8/importlib/_bootstrap.py in _gcd_import(name, package, level)
~/anaconda3/envs/ml/lib/python3.8/importlib/_bootstrap.py in _find_and_load(name, import_)
~/anaconda3/envs/ml/lib/python3.8/importlib/_bootstrap.py in _find_and_load_unlocked(name, import_)
~/anaconda3/envs/ml/lib/python3.8/importlib/_bootstrap.py in _call_with_frames_removed(f, *args, **kwds)
~/anaconda3/envs/ml/lib/python3.8/importlib/_bootstrap.py in _gcd_import(name, package, level)
~/anaconda3/envs/ml/lib/python3.8/importlib/_bootstrap.py in _find_and_load(name, import_)
~/anaconda3/envs/ml/lib/python3.8/importlib/_bootstrap.py in _find_and_load_unlocked(name, import_)
~/anaconda3/envs/ml/lib/python3.8/importlib/_bootstrap.py in _load_unlocked(spec)
~/anaconda3/envs/ml/lib/python3.8/importlib/_bootstrap_external.py in exec_module(self, module)
~/anaconda3/envs/ml/lib/python3.8/importlib/_bootstrap.py in _call_with_frames_removed(f, *args, **kwds)
~/anaconda3/envs/ml/lib/python3.8/site-packages/spacy_transformers/__init__.py in <module>
----> 1 from . import architectures
2 from . import annotation_setters
3 from . import span_getters
4 from .layers import TransformerModel
5 from .pipeline_component import Transformer, install_extensions
~/anaconda3/envs/ml/lib/python3.8/site-packages/spacy_transformers/architectures.py in <module>
3 from thinc.types import Ragged, Floats2d
4 from spacy.tokens import Doc
----> 5 from .layers import TransformerModel, TransformerListener
6 from .layers import trfs2arrays, split_trf_batch
7 from .util import registry
~/anaconda3/envs/ml/lib/python3.8/site-packages/spacy_transformers/layers/__init__.py in <module>
----> 1 from .listener import TransformerListener
2 from .transformer_model import TransformerModel
3 from .split_trf import split_trf_batch
4 from .trfs2arrays import trfs2arrays
5
~/anaconda3/envs/ml/lib/python3.8/site-packages/spacy_transformers/layers/listener.py in <module>
2 from thinc.api import Model
3 from spacy.tokens import Doc
----> 4 from ..data_classes import TransformerData
5
6
~/anaconda3/envs/ml/lib/python3.8/site-packages/spacy_transformers/data_classes.py in <module>
9 import srsly
10
---> 11 from .util import transpose_list
12 from .align import get_token_positions
13
~/anaconda3/envs/ml/lib/python3.8/site-packages/spacy_transformers/util.py in <module>
2 from pathlib import Path
3 import random
----> 4 from transformers import AutoModel, AutoTokenizer
5 from transformers.tokenization_utils import BatchEncoding
6 from transformers.tokenization_utils_fast import PreTrainedTokenizerFast
~/anaconda3/envs/ml/lib/python3.8/site-packages/transformers/__init__.py in <module>
624
625 # Trainer
--> 626 from .trainer import Trainer
627 from .trainer_pt_utils import torch_distributed_zero_first
628 else:
~/anaconda3/envs/ml/lib/python3.8/site-packages/transformers/trainer.py in <module>
67 TrainerState,
68 )
---> 69 from .trainer_pt_utils import (
70 DistributedTensorGatherer,
71 SequentialDistributedSampler,
~/anaconda3/envs/ml/lib/python3.8/site-packages/transformers/trainer_pt_utils.py in <module>
38 SAVE_STATE_WARNING = ""
39 else:
---> 40 from torch.optim.lr_scheduler import SAVE_STATE_WARNING
41
42 logger = logging.get_logger(__name__)
ImportError: cannot import name 'SAVE_STATE_WARNING' from 'torch.optim.lr_scheduler' (/Users/baconbaker/anaconda3/envs/ml/lib/python3.8/site-packages/torch/optim/lr_scheduler.py)
Reverting to torch 1.4.0 from the current stable release 1.8.1 solves the problem, but I don't want to do so.
Is there an alternative solution?
It looks like this is fixed in newer versions of transformers (https://github.com/huggingface/transformers/pull/8979). Try upgrading both transformers and spacy-transformers.
Perl is giving me an undef value when I access a variable that is supposed to be defined in the %ENV hash. How is this possible?
root#23cd5f45def7:~/bin$ perl -e 'warn $ENV{SHELL}'
Warning: something's wrong at -e line 1.
I would expect perl to output /bin/bash instead.
More info on the environment:
root#23cd5f45def7:~/bin$ echo $SHELL
/bin/bash
root#23cd5f45def7:~/bin$ $SHELL --version
GNU bash, version 4.2.37(1)-release (x86_64-pc-linux-gnu)
Copyright (C) 2011 Free Software Foundation, Inc.
...
root#23cd5f45def7:~/bin$ perl -v
This is perl 5, version 14, subversion 2 (v5.14.2) built for x86_64-linux-gnu-thread-multi
(with 91 registered patches, see perl -V for more detail)
...
I am running this on Debian wheezy in a docker container.
The image was created with
sudo debootstrap wheezy ../_build http://ftp.us.debian.org/debian
sudo tar -C ../_build -c . | docker import - wheezy/bootstrap
I get the same beheviour with perl-5.24.1 compiled manually from sources.
The error message
Warning: something's wrong at -e line 1.
indicates that the environment variable $SHELL does not exist or is not exported.
You can list the exported variables using the export command. You can add SHELL to the exported variables using the command:
export SHELL
Something is wrong with your installation. It looks like the string
Warning: something's wrong at -e line 1.
would only ever be produced by the following code on line 461 in pp_sys.c (that is, when the argument to warn is undefined). Therefore, I am going to deduce that something sanitizes the environment before perl is invoked. You might also want to examine root's .profile, .bashrc, .bash_profile and other possibly relevant configuration files.
421 PP(pp_warn)
422 {
423 dSP; dMARK;
424 SV *exsv;
425 STRLEN len;
426 if (SP - MARK > 1) {
427 dTARGET;
428 do_join(TARG, &PL_sv_no, MARK, SP);
429 exsv = TARG;
430 SP = MARK + 1;
431 }
432 else if (SP == MARK) {
433 exsv = &PL_sv_no;
434 EXTEND(SP, 1);
435 SP = MARK + 1;
436 }
437 else {
438 exsv = TOPs;
439 if (SvGMAGICAL(exsv)) exsv = sv_mortalcopy(exsv);
440 }
441
442 if (SvROK(exsv) || (SvPV_const(exsv, len), len)) {
443 /* well-formed exception supplied */
444 }
445 else {
446 SV * const errsv = ERRSV;
447 SvGETMAGIC(errsv);
448 if (SvROK(errsv)) {
449 if (SvGMAGICAL(errsv)) {
450 exsv = sv_newmortal();
451 sv_setsv_nomg(exsv, errsv);
452 }
453 else exsv = errsv;
454 }
455 else if (SvPOKp(errsv) ? SvCUR(errsv) : SvNIOKp(errsv)) {
456 exsv = sv_newmortal();
457 sv_setsv_nomg(exsv, errsv);
458 sv_catpvs(exsv, "\t...caught");
459 }
460 else {
461 exsv = newSVpvs_flags("Warning: something's wrong", SVs_TEMP); ## <-- Here ...
462 }
463 }
464 if (SvROK(exsv) && !PL_warnhook)
465 Perl_warn(aTHX_ "%" SVf, SVfARG(exsv));
466 else warn_sv(exsv);
467 RETSETYES;
468 }
I was using IPython notebook to run PySpark with just adding the following to the notebook:
import os
os.chdir('../data_files')
import sys
import pandas as pd
%pylab inline
from IPython.display import Image
os.environ['SPARK_HOME']="spark-1.3.1-bin-hadoop2.6"
sys.path.append( os.path.join(os.environ['SPARK_HOME'], 'python') )
sys.path.append( os.path.join(os.environ['SPARK_HOME'], 'bin') )
sys.path.append( os.path.join(os.environ['SPARK_HOME'], 'python/lib/py4j-0.8.2.1-src.zip') )
from pyspark import SparkContext
sc = SparkContext('local')
This worked fine for one project. but on my second project, after running a couple of lines (not the same every time), I get the following error:
ERROR:py4j.java_gateway:An error occurred while trying to connect to the Java server
Traceback (most recent call last):
File "/usr/local/lib/python2.7/dist-packages/py4j-0.8.2.1-py2.7.egg/py4j/java_gateway.py", line 425, in start
self.socket.connect((self.address, self.port))
File "/usr/lib/python2.7/socket.py", line 224, in meth
return getattr(self._sock,name)(*args)
error: [Errno 111] Connection refused
---------------------------------------------------------------------------
Py4JNetworkError Traceback (most recent call last)
<ipython-input-21-4626925bbe8f> in <module>()
----> 1 words.count()
/home/eee/Desktop/NLP/spark-1.3.1-bin-hadoop2.6/python/pyspark/rdd.pyc in count(self)
930 3
931 """
--> 932 return self.mapPartitions(lambda i: [sum(1 for _ in i)]).sum()
933
934 def stats(self):
/home/eee/Desktop/NLP/spark-1.3.1-bin-hadoop2.6/python/pyspark/rdd.pyc in sum(self)
921 6.0
922 """
--> 923 return self.mapPartitions(lambda x: [sum(x)]).reduce(operator.add)
924
925 def count(self):
/home/eee/Desktop/NLP/spark-1.3.1-bin-hadoop2.6/python/pyspark/rdd.pyc in reduce(self, f)
737 yield reduce(f, iterator, initial)
738
--> 739 vals = self.mapPartitions(func).collect()
740 if vals:
741 return reduce(f, vals)
/home/eee/Desktop/NLP/spark-1.3.1-bin-hadoop2.6/python/pyspark/rdd.pyc in collect(self)
710 Return a list that contains all of the elements in this RDD.
711 """
--> 712 with SCCallSiteSync(self.context) as css:
713 port = self.ctx._jvm.PythonRDD.collectAndServe(self._jrdd.rdd())
714 return list(_load_from_socket(port, self._jrdd_deserializer))
/home/eee/Desktop/NLP/spark-1.3.1-bin-hadoop2.6/python/pyspark/traceback_utils.pyc in __enter__(self)
70 def __enter__(self):
71 if SCCallSiteSync._spark_stack_depth == 0:
---> 72 self._context._jsc.setCallSite(self._call_site)
73 SCCallSiteSync._spark_stack_depth += 1
74
/usr/local/lib/python2.7/dist-packages/py4j-0.8.2.1-py2.7.egg/py4j/java_gateway.pyc in __call__(self, *args)
534 END_COMMAND_PART
535
--> 536 answer = self.gateway_client.send_command(command)
537 return_value = get_return_value(answer, self.gateway_client,
538 self.target_id, self.name)
/usr/local/lib/python2.7/dist-packages/py4j-0.8.2.1-py2.7.egg/py4j/java_gateway.pyc in send_command(self, command, retry)
360 the Py4J protocol.
361 """
--> 362 connection = self._get_connection()
363 try:
364 response = connection.send_command(command)
/usr/local/lib/python2.7/dist-packages/py4j-0.8.2.1-py2.7.egg/py4j/java_gateway.pyc in _get_connection(self)
316 connection = self.deque.pop()
317 except Exception:
--> 318 connection = self._create_connection()
319 return connection
320
/usr/local/lib/python2.7/dist-packages/py4j-0.8.2.1-py2.7.egg/py4j/java_gateway.pyc in _create_connection(self)
323 connection = GatewayConnection(self.address, self.port,
324 self.auto_close, self.gateway_property)
--> 325 connection.start()
326 return connection
327
/usr/local/lib/python2.7/dist-packages/py4j-0.8.2.1-py2.7.egg/py4j/java_gateway.pyc in start(self)
430 'server'
431 logger.exception(msg)
--> 432 raise Py4JNetworkError(msg)
433
434 def close(self):
Py4JNetworkError: An error occurred while trying to connect to the Java server
Once this happens, other lines working before now raise the same problem,
any ideas?
Specifications for:
pyspark 1.4.1
ipython 4.0.0
[OSX / homebrew]
If you want to launch pyspark within a Jupyter (ex-iPython) Notebook using the iPython kernel, I advise you to launch your notebook directly with the pyspark command:
>>>pyspark
But in order to do that, you need to add three lines in your bash .profile or zsh .zshrc profile to set these environment variables:
export SPARK_HOME=/path/to/apache-spark/1.4.1/libexec
export PYSPARK_DRIVER_PYTHON=ipython2 # remember that Apache-Spark only works with pyhton2.7
export PYSPARK_DRIVER_PYTHON_OPTS='notebook'
In my case, given that I'm on OSX , an installed apache-spark with Homebrew, this is:
export SPARK_HOME=/usr/local/Cellar/apache-spark/1.4.1/libexec
export PYSPARK_DRIVER_PYTHON=ipython2
export PYSPARK_DRIVER_PYTHON_OPTS='notebook'
Then, when you execute the command 'pyspark' in your terminal, your terminal will automatically open a Jupyter (ex-iPython) notebook in your default Browser.
>>>pyspark
I 17:51:00.209 NotebookApp] Serving notebooks from local directory: /Users/Thibault/code/kaggle
[I 17:51:00.209 NotebookApp] 0 active kernels
[I 17:51:00.210 NotebookApp] The IPython Notebook is running at: http://localhost:42424/
[I 17:51:00.210 NotebookApp] Use Control-C to stop this server and shut down all kernels (twice to skip confirmation).
[I 17:51:11.980 NotebookApp] Kernel started: 53ad11b1-4fa4-459d-804c-0487036b0f29
15/09/02 17:51:15 WARN NativeCodeLoader: Unable to load native-hadoop library for your platform... using builtin-java classes where applicable
I recently undated to the latest version of Enthought Canopy (Python 2.7x). I have some python code that was working a month ago with an older version of Enthought Canopy. Now I am getting error messages related to tests and checking when I use "import". The exact same code works fine with Anaconda Spyder, so nothing has changed in my code. I am sure something was changed in the newer version of Canopy but the Enthought folks won't talk to me (I'm a free academic user). I am a "number cruncher" and not a Python expert at all. I would appreciate any help you can give me. I am using a Macbook Pro with OS X 10.8.5. I am using Canopy version 1.4.1 (64 bit).
Here is the import part of the code (same error with any "import" command):
import numba
Here are the error messages:
Welcome to Canopy's interactive data-analysis environment!
with pylab-backend set to: inline
Type '?' for more information.
In [1]: %run "/Users/kgilbert/Desktop/python2.7Nov2013/FourierPE/Fourier_PE _ORIGINAL2 copy.py"
---------------------------------------------------------------------------
UnsupportedOperation Traceback (most recent call last)
/Applications/Canopy.app/appdata/canopy-1.4.1.1975.macosx-x86_64/Canopy.app/Contents/lib/python2.7/site-packages/IPython/utils/py3compat.pyc in execfile(fname, *where)
202 else:
203 filename = fname
--> 204 __builtin__.execfile(filename, *where)
/Users/kgilbert/Desktop/python2.7Nov2013/FourierPE/Fourier_PE _ORIGINAL2 copy.py in <module>()
36 '''
37
---> 38 import numba
39 import FiniteDifferencePE_modules
40 import FourierPE_modules
/Users/kgilbert/Library/Enthought/Canopy_64bit/User/lib/python2.7/site-packages/numba/__init__.py in <module>()
4 from __future__ import print_function, division, absolute_import
5 import re
----> 6 from . import testing, decorators
7 from ._version import get_versions
8 from . import special, types, config
/Users/kgilbert/Library/Enthought/Canopy_64bit/User/lib/python2.7/site-packages/numba/testing.py in <module>()
5 import numba.unittest_support as unittest
6
----> 7 from numba.tests import NumbaTestProgram
8 from numba.utils import StringIO
9
/Users/kgilbert/Library/Enthought/Canopy_64bit/User/lib/python2.7/site-packages/numba/tests/__init__.py in <module>()
175 pass
176 else:
--> 177 faulthandler.enable()
/Applications/Canopy.app/appdata/canopy-1.4.1.1975.macosx-x86_64/Canopy.app/Contents/lib/python2.7/site-packages/IPython/kernel/zmq/iostream.pyc in fileno(self)
174
175 def fileno(self):
--> 176 raise UnsupportedOperation("IOStream has no fileno.")
177
178 def write(self, string):
UnsupportedOperation: IOStream has no file no.
NEW OUTPUT BELOW
Canopy now has numba version 0.14.0-1 Here are the error messages: %run "/Users/kgilbert/Desktop/python2.7Nov2013/FourierPE/Fourier_PE _ORIGINAL2 copy.py"
AttributeError Traceback (most recent call last)
/Applications/Canopy.app/appdata/canopy-1.4.1.1975.macosx-x86_64/Canopy.app/Contents/lib/python2.7/site-packages/IPython/utils/py3compat.pyc in execfile(fname, *where)
202 else:
203 filename = fname
--> 204 builtin.execfile(filename, *where)
/Users/kgilbert/Desktop/python2.7Nov2013/FourierPE/Fourier_PE _ORIGINAL2 copy.py in ()
36 '''
37
---> 38 import numba
39 import FiniteDifferencePE_modules
40 import FourierPE_modules
/Users/kgilbert/Library/Enthought/Canopy_64bit/User/lib/python2.7/site-packages/numba/init.py in ()
4 from future import print_function, division, absolute_import
5 import re
----> 6 from . import testing, decorators
7 from ._version import get_versions
8 from . import special, types, config
/Users/kgilbert/Library/Enthought/Canopy_64bit/User/lib/python2.7/site-packages/numba/testing.py in ()
3 import contextlib
4
----> 5 import numba.unittest_support as unittest
6
7 from numba.tests import NumbaTestProgram
AttributeError: 'module' object has no attribute 'unittest_support'
QUESTION: WHAT HAPPENS WHEN YOU IMPORT NUMBA FROM IPYTHON
ANSWER: I GET THE FIRST ERROR MESSAGE ABOVE (GIVEN HERE BELOW)
Welcome to Canopy's interactive data-analysis environment!
with pylab-backend set to: inline
Type '?' for more information.
In [1]: import numba
UnsupportedOperation Traceback (most recent call last)
in ()
----> 1 import numba
/Users/kgilbert/Library/Enthought/Canopy_64bit/User/lib/python2.7/site-packages/numba/init.py in ()
4 from future import print_function, division, absolute_import
5 import re
----> 6 from . import testing, decorators
7 from ._version import get_versions
8 from . import special, types, config
/Users/kgilbert/Library/Enthought/Canopy_64bit/User/lib/python2.7/site-packages/numba/testing.py in ()
5 import numba.unittest_support as unittest
6
----> 7 from numba.tests import NumbaTestProgram
8 from numba.utils import StringIO
9
/Users/kgilbert/Library/Enthought/Canopy_64bit/User/lib/python2.7/site-packages/numba/tests/init.py in ()
175 pass
176 else:
--> 177 faulthandler.enable()
/Applications/Canopy.app/appdata/canopy-1.4.1.1975.macosx-x86_64/Canopy.app/Contents/lib/python2.7/site-packages/IPython/kernel/zmq/iostream.pyc in fileno(self)
174
175 def fileno(self):
--> 176 raise UnsupportedOperation("IOStream has no fileno.")
177
178 def write(self, string):
UnsupportedOperation: IOStream has no fileno.
In [2]: