I was following a tutorial in the pytest documentation for creating temporary directories, https://docs.pytest.org/en/stable/tmpdir.html and when I executed the following code, the contents of my entire documents folder was deleted.
import pytest
import os
#pytest.fixture()
def tmdirmak(tmpdir_factory):
f = tmpdir_factory.mktemp("data")
return f
def test_temp(tmp_path):
d = tmp_path / "sub1"
d.mkdir()
p = d / "tem.txt"
p.write_text("hello")
assert p.read_text() == "hello"
os.system("ls")
This file was saved in my documents folder and this is where I ran the command pytest --basetemp=. -s test_paths.py which accounting to the documentation is how you change where the temporary directory is made. I did not want the temporary directory to be made in /tmp but rather I wanted it created in the same directory that my code lives. When I ran this I got the following pytest output
test_paths.py E
================================================= ERRORS ==================================================
_______________________________________ ERROR at setup of test_temp _______________________________________
path = '/Users/matthewclark/Documents', ignore_errors = False
onerror = functools.partial(<function on_rm_rf_error at 0x7fe29a845dd0>, start_path=PosixPath('/Users/matthewclark/Documents'))
def rmtree(path, ignore_errors=False, onerror=None):
"""Recursively delete a directory tree.
If ignore_errors is set, errors are ignored; otherwise, if onerror
is set, it is called to handle the error with arguments (func,
path, exc_info) where func is platform and implementation dependent;
path is the argument to that function that caused it to fail; and
exc_info is a tuple returned by sys.exc_info(). If ignore_errors
is false and onerror is None, an exception is raised.
"""
if ignore_errors:
def onerror(*args):
pass
elif onerror is None:
def onerror(*args):
raise
if _use_fd_functions:
# While the unsafe rmtree works fine on bytes, the fd based does not.
if isinstance(path, bytes):
path = os.fsdecode(path)
# Note: To guard against symlink races, we use the standard
# lstat()/open()/fstat() trick.
try:
orig_st = os.lstat(path)
except Exception:
onerror(os.lstat, path, sys.exc_info())
return
try:
fd = os.open(path, os.O_RDONLY)
except Exception:
onerror(os.lstat, path, sys.exc_info())
return
try:
if os.path.samestat(orig_st, os.fstat(fd)):
_rmtree_safe_fd(fd, path, onerror)
try:
> os.rmdir(path)
E PermissionError: [Errno 13] Permission denied: '/Users/matthewclark/Documents'
../anaconda3/lib/python3.7/shutil.py:496: PermissionError
During handling of the above exception, another exception occurred:
path = '/Users/matthewclark/Documents', ignore_errors = False
onerror = functools.partial(<function on_rm_rf_error at 0x7fe29a845dd0>, start_path=PosixPath('/Users/matthewclark/Documents'))
def rmtree(path, ignore_errors=False, onerror=None):
"""Recursively delete a directory tree.
If ignore_errors is set, errors are ignored; otherwise, if onerror
is set, it is called to handle the error with arguments (func,
path, exc_info) where func is platform and implementation dependent;
path is the argument to that function that caused it to fail; and
exc_info is a tuple returned by sys.exc_info(). If ignore_errors
is false and onerror is None, an exception is raised.
"""
if ignore_errors:
def onerror(*args):
pass
elif onerror is None:
def onerror(*args):
raise
if _use_fd_functions:
# While the unsafe rmtree works fine on bytes, the fd based does not.
if isinstance(path, bytes):
path = os.fsdecode(path)
# Note: To guard against symlink races, we use the standard
# lstat()/open()/fstat() trick.
try:
orig_st = os.lstat(path)
except Exception:
onerror(os.lstat, path, sys.exc_info())
return
try:
fd = os.open(path, os.O_RDONLY)
except Exception:
onerror(os.lstat, path, sys.exc_info())
return
try:
if os.path.samestat(orig_st, os.fstat(fd)):
_rmtree_safe_fd(fd, path, onerror)
try:
os.rmdir(path)
except OSError:
> onerror(os.rmdir, path, sys.exc_info())
E PermissionError: [Errno 13] Permission denied: '/Users/matthewclark/Documents'
../anaconda3/lib/python3.7/shutil.py:498: PermissionError
============================================ 1 error in 0.21s =============================================
I think returned to find all my folders and documents in my documents directory to be deleted! I suspect since pytest normally thinks it is in /tmp it just clears out all the stuff in that folder when it is done, including the things it didn't create. If this is true, this is a horrible bug. What's strange is I received these permission errors yet my contents were deleted anyway.
Related
I was trying to build core-image-minimal for Jetson TX2 following the instructions from this link
https://developer.ridgerun.com/wiki/index.php?title=Yocto_Support_for_NVIDIA_Jetson_Platforms-Old .
My build configuration is
Build Configuration:
BB_VERSION = "1.46.0"
BUILD_SYS = "x86_64-linux"
NATIVELSBSTRING = "universal"
TARGET_SYS = "aarch64-poky-linux"
MACHINE = "jetson-tx2"
DISTRO = "poky"
DISTRO_VERSION = "3.1.5"
TUNE_FEATURES = "aarch64 armv8a crc"
TARGET_FPU = ""
meta-tegra = "dunfell-l4t-r32.4.3:3b4df1ac05e9f96e0363630c036f5445800af435"
meta
meta-poky
meta-yocto-bsp = "dunfell:6e89d668246fb37b2217aae7ae57390e793696d8"
But I got this error related to tegra-minimal-initramfs recipe.
WARNING: tegra-minimal-initramfs-1.0-r0 do_image_complete: KeyError in .
ERROR: tegra-minimal-initramfs-1.0-r0 do_image_complete: Error executing a python function in exec_python_func() autogenerated:
The stack trace of python calls that resulted in this exception/failure was:
File: 'exec_python_func() autogenerated', lineno: 2, function: <module>
0001:
*** 0002:sstate_report_unihash(d)
0003:
File: '/home/pc_1175/yocto-tegra/poky-dunfell/meta/classes/sstate.bbclass', lineno: 844, function: sstate_report_unihash
0840: report_unihash = getattr(bb.parse.siggen, 'report_unihash', None)
0841:
0842: if report_unihash:
0843: ss = sstate_state_fromvars(d)
*** 0844: report_unihash(os.getcwd(), ss['task'], d)
0845:}
0846:
0847:#
0848:# Shell function to decompress and prepare a package for installation
File: '/home/pc_1175/yocto-tegra/poky-dunfell/bitbake/lib/bb/siggen.py', lineno: 555, function: report_unihash
0551:
0552: if "." in self.method:
0553: (module, method) = self.method.rsplit('.', 1)
0554: locs['method'] = getattr(importlib.import_module(module), method)
*** 0555: outhash = bb.utils.better_eval('method(path, sigfile, task, d)', locs)
0556: else:
0557: outhash = bb.utils.better_eval(self.method + '(path, sigfile, task, d)', locs)
0558:
0559: try:
File: '/home/pc_1175/yocto-tegra/poky-dunfell/bitbake/lib/bb/utils.py', lineno: 420, function: better_eval
0416: if extraglobals:
0417: ctx = copy.copy(ctx)
0418: for g in extraglobals:
0419: ctx[g] = extraglobals[g]
*** 0420: return eval(source, ctx, locals)
0421:
0422:#contextmanager
0423:def fileslocked(files):
0424: """Context manager for locking and unlocking file locks."""
File: '<string>', lineno: 1, function: <module>
File "<string>", line 1, in <module>
File: '/home/pc_1175/yocto-tegra/poky-dunfell/meta/lib/oe/sstatesig.py', lineno: 593, function: OEOuthashBasic
0589:
0590: update_hash("\n")
0591:
0592: # Process this directory and all its child files
*** 0593: process(root)
0594: for f in files:
0595: if f == 'fixmepath':
0596: continue
0597: process(os.path.join(root, f))
File: '/home/pc_1175/yocto-tegra/poky-dunfell/meta/lib/oe/sstatesig.py', lineno: 553, function: process
0549: add_perm(stat.S_IXOTH, 'x')
0550:
0551: if include_owners:
0552: try:
*** 0553: update_hash(" %10s" % pwd.getpwuid(s.st_uid).pw_name)
0554: update_hash(" %10s" % grp.getgrgid(s.st_gid).gr_name)
0555: except KeyError:
0556: bb.warn("KeyError in %s" % path)
0557: raise
Exception: KeyError: 'getpwuid(): uid not found: 1000'
ERROR: Logfile of failure stored in: /home/pc_1175/yocto-tegra/build/tmp/work/jetson_tx2-poky-linux/tegra-minimal-initramfs/1.0-r0/temp/log.do_image_complete.23961
ERROR: Task (/home/pc_1175/yocto-tegra/meta-tegra/recipes-core/images/tegra-minimal-initramfs.bb:do_image_complete) failed with exit code '1'
The problem is that I have built the same image before with the same packages and I didn't get this error.
When I added TEGRA_INITRAMFS_INITRD = "0" to local.conf file I didn't get this error but I'm wondering if it can affect my system.
TLDR;
Just clean image recipe working directory with bitbake -c cleansstate tegra-minimal-initramfs and also bitbake -c cleansstate core-image-minimal just in case. Than build should work, but it may be not reproducible. That is, maybe you'll have to call this two cleansstate commands before building images (tegra-minimal-initramfs and core-image-minimal) every time.
I got same issue when migrated my project (not connected with Tegra) to Yocto 3.2. The issue was with pseudo - it is a fakeroot-tool, that is used in Yocto for generating rootfs with right files permissions (you run bitbake as an ordinary user, not root, but get rootfs with all files belonging to root). Here is bug i've posted with my patch.
But you are using Yocto 3.1.5 as i see, so your issue is different. The core reason is that during some package build (the one that was excluded by TEGRA_INITRAMFS_INITRD = "0") pseudo remembered, that some file should belong to user 1000, but during building tegra-minimal-initramfs (generation of initramfs) user 1000 was not found in initramfs itself... because there are only root and some basic Linux users.
To your question, may anything break if you leave TEGRA_INITRAMFS_INITRD = "0". Likely yes. Here is where this variable is applied. It is used during U-Boot build and looks like it turns off initramfs usage at all. So with TEGRA_INITRAMFS_INITRD = "0" your final image wouldn't include initramfs file. If the device has some fallback mechanism to boot without initramfs, seems you are ok. If not - try cleansstate.
We are using py.test 2.8.7 and I have the below method which creates a separate log file for every test-case. However this does not handle unhandled Exceptions. So if a code snippet throws an Exception instead of failing with an assert, the stack-trace of the Exception is not logged into the separate file. Can someone please help me in how I could capture these Exceptions?
def remove_special_chars(input):
"""
Replaces all special characters which ideally shout not be included in the name of a file
Such characters will be replaced with a dot so we know there was something useful there
"""
for special_ch in ["/", "\\", "<", ">", "|", "&", ":", "*", "?", "\"", "'"]:
input = input.replace(special_ch, ".")
return input
def assemble_test_fqn(node):
"""
Assembles a fully-qualified name for our test-case which will be used as its test log file name
"""
current_node = node
result = ""
while current_node is not None:
if current_node.name == "()":
current_node = current_node.parent
continue
if result != "":
result = "." + result
result = current_node.name + result
current_node = current_node.parent
return remove_special_chars(result)
# This fixture creates a logger per test-case
#pytest.yield_fixture(scope="function", autouse=True)
def set_log_file_per_method(request):
"""
Creates a separate file logging handler for each test method
"""
# Assembling the location of the log folder
test_log_dir = "%s/all_test_logs" % (request.config.getoption("--output-dir"))
# Creating the log folder if it does not exist
if not os.path.exists(test_log_dir):
os.makedirs(test_log_dir)
# Adding a file handler
test_log_file = "%s/%s.log" % (test_log_dir, assemble_test_fqn(request.node))
file_handler = logging.FileHandler(filename=test_log_file, mode="w")
file_handler.setLevel("INFO")
log_format = request.config.getoption("--log-format")
log_formatter = logging.Formatter(log_format)
file_handler.setFormatter(log_formatter)
logging.getLogger('').addHandler(file_handler)
yield
# After the test finished, we remove the file handler
file_handler.close()
logging.getLogger('').removeHandler(file_handler)
I have ended-up with a custom plugin:
import io
import os
import pytest
def remove_special_chars(text):
"""
Replaces all special characters which ideally shout not be included in the name of a file
Such characters will be replaced with a dot so we know there was something useful there
"""
for special_ch in ["/", "\\", "<", ">", "|", "&", ":", "*", "?", "\"", "'"]:
text = text.replace(special_ch, ".")
return text
def assemble_test_fqn(node):
"""
Assembles a fully-qualified name for our test-case which will be used as its test log file name
The result will also include the potential path of the log file as the parents are appended to the fqn with a /
"""
current_node = node
result = ""
while current_node is not None:
if current_node.name == "()":
current_node = current_node.parent
continue
if result != "":
result = "/" + result
result = remove_special_chars(current_node.name) + result
current_node = current_node.parent
return result
def as_unicode(text):
"""
Encodes a text into unicode
If it's already unicode, we do not touch it
"""
if isinstance(text, unicode):
return text
else:
return unicode(str(text))
class TestReport:
"""
Holds a test-report
"""
def __init__(self, fqn):
self._fqn = fqn
self._errors = []
self._sections = []
def add_error(self, error):
"""
Adds an error (either an Exception or an assertion error) to the list of errors
"""
self._errors.append(error)
def add_sections(self, sections):
"""
Adds captured sections to our internal list of sections
Since tests can have multiple phases (setup, call, teardown) this will be invoked for all phases
If for a newer phase we already captured a section, we override it in our already existing internal list
"""
interim = []
for current_section in self._sections:
section_to_add = current_section
# If the current section we already have is also present in the input parameter,
# we override our existing section with the one from the input as that's newer
for index, input_section in enumerate(sections):
if current_section[0] == input_section[0]:
section_to_add = input_section
sections.pop(index)
break
interim.append(section_to_add)
# Adding the new sections from the input parameter to our internal list
for input_section in sections:
interim.append(input_section)
# And finally overriding our internal list of sections
self._sections = interim
def save_to_file(self, log_folder):
"""
Saves the current report to a log file
"""
# Adding a file handler
test_log_file = "%s/%s.log" % (log_folder, self._fqn)
# Creating the log folder if it does not exist
if not os.path.exists(os.path.dirname(test_log_file)):
os.makedirs(os.path.dirname(test_log_file))
# Saving the report to the given log file
with io.open(test_log_file, 'w', encoding='UTF-8') as f:
for error in self._errors:
f.write(as_unicode(error))
f.write(u"\n\n")
for index, section in enumerate(self._sections):
f.write(as_unicode(section[0]))
f.write(u":\n")
f.write((u"=" * (len(section[0]) + 1)) + u"\n")
f.write(as_unicode(section[1]))
if index < len(self._sections) - 1:
f.write(u"\n")
class ReportGenerator:
"""
A py.test plugin which collects the test-reports and saves them to a separate file per test
"""
def __init__(self, output_dir):
self._reports = {}
self._output_dir = output_dir
#pytest.hookimpl(tryfirst=True, hookwrapper=True)
def pytest_runtest_makereport(self, item, call):
outcome = yield
# Generating the fully-qualified name of the underlying test
fqn = assemble_test_fqn(item)
# Getting the already existing report for the given test from our internal dict or creating a new one if it's not already present
# We need to do this as this method will be invoked for each phase (setup, call, teardown)
if fqn not in self._reports:
report = TestReport(fqn)
self._reports.update({fqn: report})
else:
report = self._reports[fqn]
result = outcome.result
# Appending the sections for the current phase to the test-report
report.add_sections(result.sections)
# If we have an error, we add that as well to the test-report
if hasattr(result, "longrepr") and result.longrepr is not None:
error = result.longrepr
error_text = ""
if isinstance(error, str) or isinstance(error, unicode):
error_text = as_unicode(error)
elif isinstance(error, tuple):
error_text = u"\n".join([as_unicode(e) for e in error])
elif hasattr(error, "reprcrash") and hasattr(error, "reprtraceback"):
if error.reprcrash is not None:
error_text += str(error.reprcrash)
if error.reprtraceback is not None:
if error_text != "":
error_text += "\n\n"
error_text += str(error.reprtraceback)
else:
error_text = as_unicode(error)
report.add_error(error_text)
# Finally saving the report
# We need to do this for all phases as we don't know if and when a test would fail
# This will essentially override the previous log file for a test if we are in a newer phase
report.save_to_file("%s/all_test_logs" % self._output_dir)
def pytest_configure(config):
config._report_generator = ReportGenerator("result")
config.pluginmanager.register(config._report_generator)
I have the following function I wish to test:
def download(self):
s3 = boto3.client('s3')
try:
with open(self.flow_cells +'.tar', 'wb') as data:
s3.download_fileobj(
self.source_s3_bucket,
self.source_key,
data
)
return True
except botocore.exceptions.ClientError as error:
print(error.response['Error']['Code'])
I am using pytest to test code with moto. All other tests and botocore exceptions are getting flagged except for this one. I am capturing in standard out that it is getting to the exception function and printing the correct code, but moto is not flagging it as an Exception
Here is my testing code.
def test_download(parse_args, file_test):
with moto.mock_s3():
s3 = boto3.resource('s3')
s3.create_bucket(Bucket=parse_args.glacier_s3_bucket, CreateBucketConfiguration={
'LocationConstraint': 'us-east-1'
})
s3.create_bucket(Bucket=parse_args.output_s3_bucket, CreateBucketConfiguration={
'LocationConstraint': 'us-east-1'
})
bucket_version = s3.BucketVersioning(parse_args.glacier_s3_bucket)
bucket_version.enable()
s3.Object(parse_args.glacier_s3_bucket, 'flowcells/flowcell-testing.tar').put\
(Body=open(file_test, 'rb'))
glacier_client = GlacierRestoreClient(parse_args)
assert glacier_client.download() is True
s3.Object(glacier_client.source_s3_bucket, glacier_client.source_key).delete()
with pytest.raises(Exception) as error:
glacier_client.download()
assert 'Error' in error
Inside except clause the exception is silenced and not propagated, so you need to re-raise it:
except botocore.exceptions.ClientError as error:
print(error.response['Error']['Code'])
raise
Bare raise re-raises exception that was just caught.
PS. Shameless plug: I was one of those who asked Guido 20 years ago to add bare raise! :-)
I am trying to annotate a corpus using Syntaxnet. I added the following lines in the end of the /models/syntaxnet/syntaxnet/models/parsey_mcparseface/context.pbtxt file:
input {
name: 'input_file'
record_format: 'english-text'
Part {
file_pattern: '/home/melvyn/text.txt'
}
}
output {
name: 'output_file'
record_format: 'english-text'
Part {
file_pattern: '/home/melvyn/text-tagged.txt'
}
}
When i run the command:
./demo.sh --input=input_file --output=output_file
I am getting:
./demo.sh: line 31: bazel-bin/syntaxnet/parser_eval: No such file or directory
./demo.sh: line 43: bazel-bin/syntaxnet/parser_eval: No such file or directory
./demo.sh: line 55: bazel-bin/syntaxnet/conll2tree: No such file or directory
According to the answer given ## here ## I changed my demo.sh file and now I get some errors which say:
[libprotobuf ERROR external/tf/google/protobuf/src/google/protobuf/text_format.cc:291] Error parsing text-format syntaxnet.TaskSpec: 200:8: Message type "syntaxnet.TaskOutput" has no field named "Part".
E external/tf/tensorflow/core/framework/op_segment.cc:53] Create kernel failed: Invalid argument: Could not parse task context at syntaxnet/models/parsey_mcparseface/context.pbtxt
E external/tf/tensorflow/core/common_runtime/executor.cc:333] Executor failed to create kernel. Invalid argument: Could not parse task context at syntaxnet/models/parsey_mcparseface/context.pbtxt
[[Node: DocumentSource = DocumentSourcebatch_size=32, corpus_name="stdin-conll", task_context="syntaxnet/models/parsey_mcparseface/context.pbtxt", _device="/job:localhost/replica:0/task:0/cpu:0"]]
What could be a possible solution?
Though it's not certain but I think you are not running the shell script from the root directory. Please try running it as per the instructions mentioned here
I hope it helps.
I 've tried to use tornado.platform.twisted to integrate txyam memcached client, but when I try to check it for functioning, next error is thrown:
Traceback (most recent call last):
File "swcomet/tx_memcache_helper.py", line 32, in <module>
mem_helper = MemcacheHelper()
File "swcomet/tx_memcache_helper.py", line 19, in __init__
self.add(4)
File "/home/rustem/work/sw.services.swcomet.python/venv/local/lib/python2.7/site-packages/tornado/gen.py", line 117, in wrapper
gen = func(*args, **kwargs)
File "swcomet/tx_memcache_helper.py", line 25, in add
self.mem.getPickled(user_id, decompress=True)
File "/home/rustem/work/sw.services.swcomet.python/venv/lib/python2.7/site-packages/txyam/client.py", line 133, in getPickled
return self.get(key, **kwargs).addCallback(handleResult, uncompress)
File "/home/rustem/work/sw.services.swcomet.python/venv/lib/python2.7/site-packages/txyam/client.py", line 27, in wrapper
func = getattr(self.getClient(key), cmd)
File "/home/rustem/work/sw.services.swcomet.python/venv/lib/python2.7/site-packages/txyam/client.py", line 48, in getClient
raise NoServerError, "No connected servers remaining."
txyam.client.NoServerError: No connected servers remaining.
The source code which dumps that error:
import tornado.ioloop
import tornado.gen
from txyam.client import YamClient
from swtools.date import _ts
import tornado.platform.twisted
MEMHOSTS = ['127.0.0.1111']
USER_EXPIRATION_TIME = 61
class MemcacheHelper(object):
def __init__(self, *a, **kw):
try:
self.mem = YamClient(["127.0.0.1"])
except Exception, e:
print "ERror", e
self.clients = set()
self.add(4)
#tornado.gen.engine
def add(self, user_id, expire=None):
self.clients.add(user_id)
expire = expire or USER_EXPIRATION_TIME
self.mem.getPickled(user_id, decompress=True)
print "hmmm"
if __name__ == '__main__':
print "trying to start on top of IOLOOP"
ioloop = tornado.ioloop.IOLoop.instance()
#reactor = TornadoReactor(ioloop)
mem_helper = MemcacheHelper()
#mem_helper.add(4)
ioloop.start()
Please, help me to solve this problem!
txyam appears not to let you perform any memcache operations until after at least one connection has been established:
def getActiveConnections(self):
return [factory.client for factory in self.factories if not factory.client is None]
def getClient(self, key):
hosts = self.getActiveConnections()
log.msg("Using %i active hosts" % len(hosts))
if len(hosts) == 0:
raise NoServerError, "No connected servers remaining."
return hosts[ketama(key) % len(hosts)]
It attempts to set up these connections right away:
def __init__(self, hosts):
"""
#param hosts: A C{list} of C{tuple}s containing hosts and ports.
"""
self.connect(hosts)
But connection setup is asynchronous, and it doesn't expose an event to indicate when at least one connection has been established.
So your code fails because you call add right away, before any connections exist. A good long-term fix would be to file a bug report against txyam, because this isn't a very nice interface. YamClient could have a whenReady method that returns a Deferred that fires when you are actually allowed to use the YamClient instance. Or there could be an alternate constructor that returns a Deferred that fires with the YamClient instance, but only after it can be used.