Basically this code reads from a pipe and constantly prints output without blocking ... Here is the whole code:
1)First script:
if __name__ == '__main__':
for i in range(5):
print str(i)+'\n',
sys.stdout.flush()
time.sleep(1)
2) Second script:
def log_worker(stdout):
while True:
output = non_block_read(stdout).strip()
if output:
print output
def non_block_read(output):
''' even in a thread, a normal read with block until the buffer is full '''
fd = output.fileno()
fl = fcntl.fcntl(fd, fcntl.F_GETFL)
fcntl.fcntl(fd, fcntl.F_SETFL, fl | os.O_NONBLOCK)
try:
return output.read()
except:
return ''
if __name__ == '__main__':
mysql_process = subprocess.Popen(['python','-u', 'flush.py'], stdin=sys.stdin,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
thread = Thread(target=log_worker, args=[mysql_process.stdout])
thread.daemon = True
thread.start()
mysql_process.wait()
thread.join(timeout=1)
I would like to know why it works that way :
1) If i take Thread absolutely away , and just call log_worker in the main at also prints everything one by one , but the problem is it hangs after completion without finishing. And i read somewhere that here thread is exactly used for it to finish or more correctly thread dies when it finishes printing something , So why does it work that way ? What thread exactly does here and how ?
2) If i keep the thread but remove mysql_process.wait() and thread.join it prints nothing .... Why ? I read that Popen.wait is meant for its child process to terminate. Set and return returncode attribute. What is the child process here and why/how it is child O_O ?
3) If i only remove thread.join(timeout=1) then it finishes but with error Exception in thread Thread-1 (most likely raised during interpreter shutdown):. Why ? What role .join plays here.
4) I read the documentation of functions used in non_block_read function , but still am confused. Okay it is obvious they take file descriptor and set it to non blocking . The thing i am confused is , on what can i use all those functions , i mean i understand that on files , but how come they use it on stdout O_O ? It's not a file , it's a stream ~~ ?
All this i do to execute a script with subprocess.Popen in tornado script , and constantly send output to client/myself without blocking, so if anyone can kind of help me do that i would really appreciate it , because i can't imagine how to kind of get this output from thread in a way that i can constanly insert it in self.send in tornadio2 ...
def on_message(self, message):
# list = subprocess.Popen([r"ls", "-l"], stdout=subprocess.PIPE)
# list_stdout = list.communicate()[0]
for i in range(1,10):
time.sleep(1)
self.send(i)
With the dev version (3.2), that's realy easy
from tornado.web import RequestHandler,asynchronous
from tornado.process import Subprocess
class home(RequestHandler):
#asynchrounous
def get(self):
seld.sp=Subprocess('date && sleep 5 && date',shell=True,stdout=Subprocess.STREAM)
self.sp.set_exit_callback(self.next)
def next(self,s):
self.sp.stdout.read_until_close(self.finish)
application = ...
Related
I have a load of test which I want to rerun if there is a particular exception. The reason for this is that I am running real API calls to a server and sometimes I hit the rate limit for the API, in which case I want to wait and try again.
However, I am also using a pytest fixture to make each test is run several times, because I am sending requests to different servers (the actual use case is different cryptocurrency exchanges).
Using pytest-rerunfailures comes very close to what I need...apart from that I can't see how to look at the exception of the last test run in the condition.
Below is some code which shows what I am trying to achieve, but I don't want to write code like this for every test obviously.
#pytest_asyncio.fixture(
params=EXCHANGE_NAMES,
)
async def client(request):
exchange_name = request.param
exchange_client = get_exchange_client(exchange_name)
return exchange_client
def test_something(client):
test_something.count += 1
### This block is the code I want to
try:
result = client.do_something()
except RateLimitException:
test_something.count
if test_something.count <= 3:
sleep_duration = get_sleep_duration(client)
time.sleep(sleep_duration)
# run the same test again
test_something()
else:
raise
expected = [1,2,3]
assert result == expected
You can use the retry library to wrap your actual code in:
#pytest_asyncio.fixture(
params=EXCHANGE_NAMES,
autouse=True,
)
async def client(request):
exchange_name = request.param
exchange_client = get_exchange_client(exchange_name)
return exchange_client
def test_something(client):
actual_test_something(client)
#retry(RateLimitException, tries=3, delay=2)
def actual_test_something(client):
'''Retry on RateLimitException, raise error after 3 attempts, sleep 2 seconds between attempts.'''
result = client.do_something()
expected = [1,2,3]
assert result == expected
The code looks much cleaner this way.
So I have some code which uses xlwings for writing data in Excel file, xlsm.
after i've done writing, I press a certain button to calculate.
sometimes, an error/message pops in the Excel, which is fine, but i want to catch this message to python, and write it later to a log/print it.
also, i need to interact with this message, in this case to press "Ok" in the message box
Attached image of the message box
So guys, I've been able to solve this with an external python library.
here is the code:
from pywinauto import application as autoWin
app = autoWin.Application()
con = app.connect(title = 'Configuration Error')
msgText = con.Dialog.Static2.texts()[0]
con.Dialog.Button.click()
con.Dialog.Button.click()
print(msgText)
basically, what it does, is connecting to the app, and searching for the title.
in this case "Configuration Error"
it needs to perform double click in order to press "Ok" to close the message.
Secondly, it gets the text from the message, and can forward it wherever i want.
important part to remember though, because this should be an automated task, it should run concurrently, which means Threading.
so, a simple Thread class below:
class ButtonClicker(Thread):
def __init__(self):
Thread.__init__(self)
self._stop_event = Event()
def stop(self):
self._stop_event.set()
def stopped(self):
return self._stop_event.is_set()
def run(self) -> None:
while True:
time.sleep(3)
try:
app = autoWin.Application()
con = app.connect(title='Configuration Error')
msg_data = con.Dialog.Static2.texts()[0]
while True:
con.Dialog.Button.click()
# con.Dialog.Button.click()
# print(msg_data)
return msg_data
except Exception as e:
print('Excel didnt stuck')
break
and of course to actually use it:
event_handle = ButtonClicker()
event_handle.start()
some manipulation is needed in order to work in different codes/scenarios, but at least I hope i will help others in the future, because this seems to be very common question.
#Danny's solution, i.e. pywinauto and Thread, works perfectly in my local machine, but it seems can't catch the message box when Excel is running in server mode, e.g. in my case, the automation is triggered in local and started by a system service installed in the server.
pywinauto.findwindows.ElementNotFoundError:
{'title': '<my-wanted-title>', 'backend': 'win32', 'visible_only': False}
It is finally solved with another python third-party library pywin32, so providing a backup solution here.
'''
Keep finding message box with specified title and clicking button to close it,
until stopped by the main thread.
'''
import time
from threading import Thread, Event
import win32gui
import win32con
class ButtonClicker(Thread):
def __init__(self, title:str, interval:int):
Thread.__init__(self)
self._title = title
self._interval = interval
self._stop_event = Event()
def stop(self):
'''Stop thread.'''
self._stop_event.set()
#property
def stopped(self):
return self._stop_event.is_set()
def run(self):
while not self.stopped:
try:
time.sleep(self._interval)
self._close_msgbox()
except Exception as e:
print(e, flush=True)
def _close_msgbox(self):
# find the top window by title
hwnd = win32gui.FindWindow(None, self._title)
if not hwnd: return
# find child button
h_btn = win32gui.FindWindowEx(hwnd, None,'Button', None)
if not h_btn: return
# show text
text = win32gui.GetWindowText(h_btn)
print(text)
# click button
win32gui.PostMessage(h_btn, win32con.WM_LBUTTONDOWN, None, None)
time.sleep(0.2)
win32gui.PostMessage(h_btn, win32con.WM_LBUTTONUP, None, None)
time.sleep(0.2)
if __name__=='__main__':
t = ButtonClicker('Configuration Error', 3)
t.start()
time.sleep(10)
t.stop()
In python >= 3.5 we can give optional stdout, stdin, stderr to subprocess.run()
per the docs:
Valid values are PIPE, DEVNULL, an existing file descriptor (a positive integer),
an existing file object, and None. PIPE indicates that a new pipe to the child
should be created
I want to support passing through (at least) None or existing file objects whilst managing resources pythonically.
How should I manage the optional file resources in something like:
import subprocess
def wraps_subprocess(args=['ls', '-l'], stdin=None, stdout=None):
# ... do important stuff
subprocess.run(args=args, stdin=stdin, stdout=stdout)
A custom contextmanager (idea taken from this answer) seems to work:
import contextlib
#contextlib.contextmanager
def awesome_open(path_or_file_or_none, mode='rb'):
if isinstance(path_or_file_or_none, str):
file_ = needs_closed = open(path_or_file_or_none, mode)
else:
file_ = path_or_file_or_none
needs_closed = None
try:
yield file_
finally:
if needs_closed:
needs_closed.close()
which would be used used like
import subprocess
def wraps_subprocess(args=['ls', '-l'], stdin=None, stdout=None):
# ... do important stuff
with awesome_open(stdin, mode='rb') as fin, awesome_open(stdout, mode='wb') as fout:
subprocess.run(args=args, stdin=fin, stdout=fout)
But I think there is probably a better way.
I want to create the following flow using celery configuration\api:
Send TaskA(argB) Only if celery queue has no TaskA(argB) already pending
Is it possible? how?
You can make your job aware of other tasks by some sort of memoization. If you use a cache control key (redis, memcached, /tmp, whatever is handy), you can make execution depend on that key. I'm using redis as an example.
from redis import Redis
#app.task
def run_only_one_instance(params):
try:
sentinel = Redis().incr("run_only_one_instance_sentinel")
if sentinel == 1:
#I am the legitimate running task
perform_task()
else:
#Do you want to do something else on task duplicate?
pass
Redis().decr("run_only_one_instance_sentinel")
except Exception as e:
Redis().decr("run_only_one_instance_sentinel")
# potentially log error with Sentry?
# decrement the counter to insure tasks can run
# or: raise e
I cannot think of a way but to
Retrieve all executing and scheduled tasks via celery inspect
Iterate through them to see if your task is there.
check this SO question to see how the first point is done.
good luck
I don't know it's gonna help you more than the other answers, but there goes my approach, following the same idea given by srj. I needed a way to block my server to launch tasks with same id to queue. So I made a general function to help me.
def is_task_active_or_registered(app, task_id):
i = app.control.inspect()
active_dict = i.active()
scheduled_dict = i.scheduled()
keys_set = set(active_dict.keys() + scheduled_dict.keys())
tasks_ids_set = set()
for _dict in [active_dict, scheduled_dict]:
for k in keys_set:
for task in _dict[k]:
tasks_ids_set.add(task['id'])
if task_id in tasks_ids_set:
return True
else:
return False
So, I use it like this:
In the context where my celery-app object is available, I define:
def check_task_can_not_run(task_id):
return is_task_active_or_registered(app=celery, task_id=task_id)
And so, from my client request, I call this check_task_can_not_run(...) and block task from being launched in case of True.
I was facing similar problem. The Beat was making duplicates in my queue. I wanted to use expires but this feature isn't working properly https://github.com/celery/celery/issues/4300.
So here is scheduler which checks if task has been already enqueued (based on task name).
# -*- coding: UTF-8 -*-
from __future__ import unicode_literals
import json
from heapq import heappop, heappush
from celery.beat import event_t
from celery.schedules import schedstate
from django_celery_beat.schedulers import DatabaseScheduler
from typing import List, Optional
from typing import TYPE_CHECKING
from your_project import celery_app
if TYPE_CHECKING:
from celery.beat import ScheduleEntry
def is_task_in_queue(task, queue_name=None):
# type: (str, Optional[str]) -> bool
queues = [queue_name] if queue_name else celery_app.amqp.queues.keys()
for queue in queues:
if task in get_celery_queue_tasks(queue):
return True
return False
def get_celery_queue_tasks(queue_name):
# type: (str) -> List[str]
with celery_app.pool.acquire(block=True) as conn:
tasks = conn.default_channel.client.lrange(queue_name, 0, -1)
decoded_tasks = []
for task in tasks:
j = json.loads(task)
task = j['headers']['task']
if task not in decoded_tasks:
decoded_tasks.append(task)
return decoded_tasks
class SmartScheduler(DatabaseScheduler):
"""
Smart means that prevents duplicating of tasks in queues.
"""
def is_due(self, entry):
# type: (ScheduleEntry) -> schedstate
is_due, next_time_to_run = entry.is_due()
if (
not is_due or # duplicate wouldn't be created
not is_task_in_queue(entry.task) # not in queue so let it run
):
return schedstate(is_due, next_time_to_run)
# Task should be run (is_due) and it is present in queue (is_task_in_queue)
H = self._heap
if not H:
return schedstate(False, self.max_interval)
event = H[0]
verify = heappop(H)
if verify is event:
next_entry = self.reserve(entry)
heappush(H, event_t(self._when(next_entry, next_time_to_run), event[1], next_entry))
else:
heappush(H, verify)
next_time_to_run = min(verify[0], next_time_to_run)
return schedstate(False, min(next_time_to_run, self.max_interval))
What's the best way to detect an application crash in XP (produces the same pair of 'error' windows each time - each with same window title) and then restart it?
I'm especially interested to hear of solutions that use minimal system resources as the system in question is quite old.
I had thought of using a scripting language like AutoIt (http://www.autoitscript.com/autoit3/), and perhaps triggering a 'detector' script every few minutes?
Would this be better done in Python, Perl, PowerShell or something else entirely?
Any ideas, tips, or thoughts much appreciated.
EDIT: It doesn't actually crash (i.e. exit/terminate - thanks #tialaramex). It displays a dialog waiting for user input, followed by another dialog waiting for further user input, then it actually exits. It's these dialogs that I'd like to detect and deal with.
Best way is to use a named mutex.
Start your application.
Create a new named mutex and take ownership over it
Start a new process (process not thread) or a new application, what you preffer.
From that process / application try to aquire the mutex. The process will block
When application finish release the mutex (signal it)
The "control" process will only aquire the mutex if either the application finishes or the application crashes.
Test the resulting state after aquiring the mutex. If the application had crashed it will be WAIT_ABANDONED
Explanation: When a thread finishes without releasing the mutex any other process waiting for it can aquire it but it will obtain a WAIT_ABANDONED as return value, meaning the mutex is abandoned and therfore the state of the section it was protected can be unsafe.
This way your second app won't consume any CPU cycles as it will keep waiting for the mutex (and that's enterely handled by the operating system)
How about creating a wrapper application that launches the faulty app as a child and waits for it? If the exit code of the child indicates an error, then restart it, else exit.
I think the main problem is that Dr. Watson displays a dialog
and keeps your process alive.
You can write your own debugger using the Windows API and
run the crashing application from there.
This will prevent other debuggers from catching the crash of
your application and you could also catch the Exception event.
Since I have not found any sample code, I have written this
Python quick-and-dirty sample. I am not sure how robust it is
especially the declaration of DEBUG_EVENT could be improved.
from ctypes import windll, c_int, Structure
import subprocess
WaitForDebugEvent = windll.kernel32.WaitForDebugEvent
ContinueDebugEvent = windll.kernel32.ContinueDebugEvent
DBG_CONTINUE = 0x00010002L
DBG_EXCEPTION_NOT_HANDLED = 0x80010001L
event_names = {
3: 'CREATE_PROCESS_DEBUG_EVENT',
2: 'CREATE_THREAD_DEBUG_EVENT',
1: 'EXCEPTION_DEBUG_EVENT',
5: 'EXIT_PROCESS_DEBUG_EVENT',
4: 'EXIT_THREAD_DEBUG_EVENT',
6: 'LOAD_DLL_DEBUG_EVENT',
8: 'OUTPUT_DEBUG_STRING_EVENT',
9: 'RIP_EVENT',
7: 'UNLOAD_DLL_DEBUG_EVENT',
}
class DEBUG_EVENT(Structure):
_fields_ = [
('dwDebugEventCode', c_int),
('dwProcessId', c_int),
('dwThreadId', c_int),
('u', c_int*20)]
def run_with_debugger(args):
proc = subprocess.Popen(args, creationflags=1)
event = DEBUG_EVENT()
while True:
if WaitForDebugEvent(pointer(event), 10):
print event_names.get(event.dwDebugEventCode,
'Unknown Event %s' % event.dwDebugEventCode)
ContinueDebugEvent(event.dwProcessId, event.dwThreadId, DBG_CONTINUE)
retcode = proc.poll()
if retcode is not None:
return retcode
run_with_debugger(['python', 'crash.py'])
I realize that you're dealing with Windows XP, but for people in a similar situation under Vista, there are new crash recovery APIs available. Here's a good introduction to what they can do.
Here is a slightly improved version.
In my test the previous code run in an infinite loop when the faulty exe generated an "access violation".
I'm not totally satisfied by my solution because I have no clear criteria to know which exception should be continued and which one couldn't be (The ExceptionFlags is of no help).
But it works on the example I run.
Hope it helps,
Vivian De Smedt
from ctypes import windll, c_uint, c_void_p, Structure, Union, pointer
import subprocess
WaitForDebugEvent = windll.kernel32.WaitForDebugEvent
ContinueDebugEvent = windll.kernel32.ContinueDebugEvent
DBG_CONTINUE = 0x00010002L
DBG_EXCEPTION_NOT_HANDLED = 0x80010001L
event_names = {
1: 'EXCEPTION_DEBUG_EVENT',
2: 'CREATE_THREAD_DEBUG_EVENT',
3: 'CREATE_PROCESS_DEBUG_EVENT',
4: 'EXIT_THREAD_DEBUG_EVENT',
5: 'EXIT_PROCESS_DEBUG_EVENT',
6: 'LOAD_DLL_DEBUG_EVENT',
7: 'UNLOAD_DLL_DEBUG_EVENT',
8: 'OUTPUT_DEBUG_STRING_EVENT',
9: 'RIP_EVENT',
}
EXCEPTION_MAXIMUM_PARAMETERS = 15
EXCEPTION_DATATYPE_MISALIGNMENT = 0x80000002
EXCEPTION_ACCESS_VIOLATION = 0xC0000005
EXCEPTION_ILLEGAL_INSTRUCTION = 0xC000001D
EXCEPTION_ARRAY_BOUNDS_EXCEEDED = 0xC000008C
EXCEPTION_INT_DIVIDE_BY_ZERO = 0xC0000094
EXCEPTION_INT_OVERFLOW = 0xC0000095
EXCEPTION_STACK_OVERFLOW = 0xC00000FD
class EXCEPTION_DEBUG_INFO(Structure):
_fields_ = [
("ExceptionCode", c_uint),
("ExceptionFlags", c_uint),
("ExceptionRecord", c_void_p),
("ExceptionAddress", c_void_p),
("NumberParameters", c_uint),
("ExceptionInformation", c_void_p * EXCEPTION_MAXIMUM_PARAMETERS),
]
class EXCEPTION_DEBUG_INFO(Structure):
_fields_ = [
('ExceptionRecord', EXCEPTION_DEBUG_INFO),
('dwFirstChance', c_uint),
]
class DEBUG_EVENT_INFO(Union):
_fields_ = [
("Exception", EXCEPTION_DEBUG_INFO),
]
class DEBUG_EVENT(Structure):
_fields_ = [
('dwDebugEventCode', c_uint),
('dwProcessId', c_uint),
('dwThreadId', c_uint),
('u', DEBUG_EVENT_INFO)
]
def run_with_debugger(args):
proc = subprocess.Popen(args, creationflags=1)
event = DEBUG_EVENT()
num_exception = 0
while True:
if WaitForDebugEvent(pointer(event), 10):
print event_names.get(event.dwDebugEventCode, 'Unknown Event %s' % event.dwDebugEventCode)
if event.dwDebugEventCode == 1:
num_exception += 1
exception_code = event.u.Exception.ExceptionRecord.ExceptionCode
if exception_code == 0x80000003L:
print "Unknow exception:", hex(exception_code)
else:
if exception_code == EXCEPTION_ACCESS_VIOLATION:
print "EXCEPTION_ACCESS_VIOLATION"
elif exception_code == EXCEPTION_INT_DIVIDE_BY_ZERO:
print "EXCEPTION_INT_DIVIDE_BY_ZERO"
elif exception_code == EXCEPTION_STACK_OVERFLOW:
print "EXCEPTION_STACK_OVERFLOW"
else:
print "Other exception:", hex(exception_code)
break
ContinueDebugEvent(event.dwProcessId, event.dwThreadId, DBG_CONTINUE)
retcode = proc.poll()
if retcode is not None:
return retcode
run_with_debugger(['crash.exe'])