Micropython GPIO control function in background - micropython

How can I write code to allow function run in background in exactly that time that I need?
When I run this func in cycle it's executing after others.
import time
from machine import Pin
a_1 = Pin(21, Pin.OUT)
a_2 = Pin(20, Pin.OUT)
def do_something(x):
print(x)
def some_other_actions():
time.sleep(10)
def calc_one_two():
return 1, 2
def func(a, b, on_tmr=2, off_tmr=1):
a_1.on()
do_something(a)
time.sleep_ms(on_tmr) # sleep for 2ms
a_1.off()
time.sleep_ms(off_tmr) # sleep for 1ms
a_2.on()
do_something(b)
time.sleep_ms(on_tmr) # sleep for 2ms
a_2.off()
time.sleep_ms(off_tmr) # sleep for 1ms
while True:
one, two = calc_one_two()
func(one, two)
some_other_actions() # actions that need some time

uasyncio is what you should look for. Look at documentation here
https://docs.micropython.org/en/latest/library/uasyncio.html
Bellow is minimal sample for blinking leds asynchronously, exactly what you are doing
import uasyncio
async def blink(led, period_ms):
while True:
led.on()
await uasyncio.sleep_ms(5)
led.off()
await uasyncio.sleep_ms(period_ms)
async def main(led1, led2):
uasyncio.create_task(blink(led1, 700))
uasyncio.create_task(blink(led2, 400))
await uasyncio.sleep_ms(10_000)
# Running on a generic board
from machine import Pin
uasyncio.run(main(Pin(21), Pin(20)))

Related

uPY uart not communicating correctly with EG25-G

I had a motor controller connected to GP0 and GP1 so I know they work, however I cant seem to get a response from the SIM controller. Without the pico attached to the board, I can get it to work, but when I add the pico it seems like it wont send AT commands or translate received data if the pico is getting any at all. I have tried to run the code line by line in a live session and all I get is a single number that is equal to the number of letters inside the string that I am sending to the sim controller. ie uart.write(bytearray(b'ATE1\r\n')) would return >>> 6 6 being the number of characters in the code after b. I'm ordering a new pico to see if just maybe it was my sub par soldering, but in the mean time I could see if anyone more experienced than I can point out a error.
import machine
import os
import utime
import time
import binascii
from machine import UART
pwr_enable = 22 # EG25_4G Power key connected on GP22
uart_port = 0
uart_baud = 115200
# Initialize UART0
uart = machine.UART(uart_port, uart_baud)
print(os.uname())
def wait_resp_info(timeout=3000):
prvmills = utime.ticks_ms()
info = b""
while (utime.ticks_ms()-prvmills) < timeout:
if uart.any():
info = b"".join([info, uart.read(1)])
print(info.decode())
return info
def Check_and_start(): # Initialize SIM Module
while True:
uart.write(bytearray(b'ATE1\r\n'))
utime.sleep(10)
uart.write(bytearray(b'AT\r\n'))
rec_temp = wait_resp_info()
print(wait_resp_info())
print(rec_temp)
print(rec_temp.decode())
utime.sleep(10)
if 'OK' in rec_temp.decode():
print('OK response from AT command\r\n' + rec_temp.decode())
break
else:
power = machine.Pin(pwr_enable, machine.Pin.OUT)
power.value(1)
utime.sleep(2)
power.value(0)
print('No response, restarting\r\n')
utime.sleep(10)
def Network_check():# Network connectivity check
for i in range(1, 3):
if Send_command("AT+CGREG?", "0,1") == 1:
print('Connected\r\n')
break
else:
print('Device is NOT connected\r\n')
utime.sleep(2)
continue
def Str_to_hex_str(string):
str_bin = string.encode('utf-8')
return binascii.hexlify(str_bin).decode('utf-8')
Check_and_start()
Network_check()
Response is
>>> Check_and_start()
b''
b'\x00\x00'
No response, restarting
New Pico fixed my issue, I believe it to be that my inadequate soldering skills created the issue. Symptoms were, no UART data was being transmitted or received through UART pins 0 and 1. Solution was new Pico board was inserted in place of old one, same code was uploaded and ran successfully first time.

TTN (The Things Network) counter and Pycom Lopy4 problem

I have a problem on the counter in TTN. this problem happened after I added machine.deepsleep function in my LoPy4. I have used the nvram_save () and nvram_restore () functions, but the counter in TTN still not increment.
this screenCapture :
So, what's the problem in this case ?
this my code :
enter code here
import ads1x15 as extADC
import gc
import pycom
import socket
from network import LoRa
from uModBus.serial import Serial
from network import LoRa
import machine
import ubinascii
import utime
from machine import RTC, I2C
from machine import Pin
pycom.heartbeat(False)
rtc = RTC()
i2c = I2C(0, I2C.MASTER, pins=('P3', 'P4'), baudrate=100000)
adc = extADC.ADS1115(i2c, gain=1)
pinTrig = Pin('P23', mode=Pin.OUT)
# LoRa Socket Connection with two condition
if machine.reset_cause() == machine.DEEPSLEEP_RESET:
pinTrig.value(1) # enable High (12v)
print("WOKE UP FROM DEEPSLEEP 3 MINUTES !")
utime.sleep(1)
lora = LoRa(mode=LoRa.LORAWAN, region=LoRa.AS923)
lora.nvram_restore() # Nvram restore function
s = socket.socket(socket.AF_LORA, socket.SOCK_RAW)
s.setsockopt(socket.SOL_LORA, socket.SO_DR, 5)
s.setblocking(False)
s.bind(1)
else:
pinTrig.value(1)
utime.sleep(1)
print("I'M PowerOn by Humans or Hard reset !")
lora = LoRa(mode=LoRa.LORAWAN, region=LoRa.AS923)
lora.nvram_restore() # Nvram restore function
app_eui = ubinascii.unhexlify('********************')
app_key = ubinascii.unhexlify('-----------------------------')
lora.join(activation=LoRa.OTAA, auth=(app_eui, app_key), timeout=0)
while not lora.has_joined():
utime.sleep(2.5)
print('Not yet joined...')
print('Joined')
s = socket.socket(socket.AF_LORA, socket.SOCK_RAW)
s.setsockopt(socket.SOL_LORA, socket.SO_DR, 5)
s.setblocking(True)
s.bind(1)
### Begin sensor reading and sending function ##################
def read_data_Sensor () :
### End sensor reading and sending function ####################
try:
read_data_Sensor()
lora.nvram_save()
utime.sleep(1)
pinTrig.value(0)
print("DeepSleep Mode")
utime.sleep(1)
machine.deepsleep(180000)
except OSError:
print("Terjadi Error - Restart")
s.send(b"\xff\xff")
utime.sleep(1)
machine.reset()
My understanding of the nvmram_save/restore methods is that they restore the full state of the lora stack, including the 'joined/not joined' status.
If you explicitly do the join() every time, then this both wastes energy/time in the join exchange, and this process will always reset the counters back to 0.
I think your code should test lora.has_joined() after the nvram_restore(), and only do the join procedure if this returns False.
btw I have also experienced issues with pycom and nvmram_save/restore with TTN v3.

I use celery beat worker to create new process that use billiard and set daemonic, but daemonic is not working

billiard version 3.5.0.5
celery 4.0.2
Steps to reproduce
I want control long time beat task. like if a task not finish, not run a new process.
def lock_beat_task(gap_time, flush_time):
def decorate(func):
"""
Celery timing task solves the problem that the previous task is executed again when it is not executed.
problem: Celery is similar to crontab, it will be scheduled regularly, regardless of whether the previous task is executed or not.
achieve: With the help of the cache, the key is the class name, and each time the schedule is checked, it is checked whether it is executed before, and the key is deleted after the execution.
:param func:
:return:
"""
#wraps(func)
def wrapper(*args, **kwargs):
key_name = func.__name__
logger.info('++++++++{} enter +++++++.'.format(key_name))
monitor = BeatWokerMonitor(key_name, gap_time, flush_time)
mo_process = monitor.monitor()
if mo_process:
try:
logger.info('++++++++{} is running.++++++++'.format(key_name))
func(*args, **kwargs)
logger.info('{} is graceful end.'.format(key_name))
except KeyboardInterrupt, e:
monitor.logger.info('{} KeyboardInterrupt reset succ'.format(key_name))
finally:
monitor.reset()
# mo_process.join()
else:
logger.info('{} is running or gap time is not over.'.format(key_name))
logger.info('{} is end!---.'.format(key_name))
return wrapper
return decorate
class BeatWokerMonitor(object):
"""
Used to maintain/monitor the health status of the beat workerCall beat_worker_shoud_run.
If there is no key_name in redis, or the time difference between the current time and key_name is
greater than gap_time, create a monitor daemon.
This daemon is responsible for refreshing the time in key_name at a fixed
time.Beat_worker_shoud_run returns true(should run)。 Otherwise return None
"""
def __init__(self, key_name, gap_time, flush_time):
"""
秒级时间
:param key_name:
:param gap_time:
:param flush_time:
"""
self.key_name = key_name
self.gap_time = gap_time
self.flush_time = flush_time
self.db_redis = get_redis_conn(11)
def start_monitor(self):
flush_key_process = Process(target=self.flush_redis_key_gap_time, name='{}_monitor'.format(self.key_name), daemon=True)
flush_key_process.start()
return flush_key_process
def monitor(self):
rd_key_value = self.get_float_rd_key_value()
if not rd_key_value:
v = time.time()
self.db_redis.set(self.key_name, v)
return self.start_monitor()
if time.time() - rd_key_value > self.gap_time:
return self.start_monitor()
def get_float_rd_key_value(self):
value = self.db_redis.get(self.key_name)
if value:
return float(value)
else:
return 0
def flush_redis_key_gap_time(self):
old_time = self.get_float_rd_key_value()
self.logger.info('{} start flush, now is {}'.format(self.key_name, old_time))
while 1:
if time.time() - old_time > self.flush_time:
now = time.time()
self.db_redis.set(self.key_name, now)
old_time = now
self.logger.info('{} monitor flush time {}'.format(self.key_name, now))
else:
self.logger.info('{} not flush {} , {}'.format(self.key_name, time.time() - old_time, self.flush_time))
def reset(self):
self.db_redis.delete(self.key_name)
and task code. you can write some short time task to test .
#app.task
#lock_beat_task(5*60, 10*3)
#send_update_msg("update")
def beat_update_all():
"""
:return:
"""
from crontab.update import EAllTicket
eall = EAllTicket()
send_task_nums = eall.run()
return send_task_nums
Expected behavior
want background run, so can not use multiprocessing to create child process.
use billiard instead
want beat_update_all() finished and monitor process will kill itself.
Actual behavior
beat_update_all() finished and monitor process is still running.

How can I use pyglet batches to draw scenes or levels

So I'm currently learning pyglet for Python 2.7 and I'm trying to make a simple game that has levels. The 1st 'scene' would be the title/intro part, 2nd would be a tutorial of some sort, and the rest are the game levels themselves.
For this, I've created 7 batches(1 intro, 1 tutorial, 5 levels) namely batch, batch1, ... batch6. I've also created 7 classes for each of these batches that represent the scenes/levels. This is what I've done for the intro batch and class:
batch = pyglet.graphics.Batch()
batch1 = pyglet.graphics.Batch()
class StartState:
def __init__(self):
self.welcome = pyglet.text.Label('WELCOME TO', font_name='Arial', font_size=32, color=(200,255,255,255), x=400, y=550, anchor_x='center', anchor_y='center', batch=batch)
self.title = pyglet.text.Label("MY GAME", font_name='Arial', font_size=32, color=(100,200,170,255), x=400, y=450, anchor_x='center', anchor_y='center', batch=batch)
self.press = pyglet.text.Label("press 'SPACE' to continue", font_name='Arial', font_size=32, color=(200,255,150,255), x=400, y=250, anchor_x='center', anchor_y='center', batch=batch)
def update(self, dt):
if keymap[pyglet.window.key.SPACE]:
self.welcome.delete()
self.title.delete()
self.press.delete()
states.pop()
batch1.draw()
The other scenes would also look like that. the states list is a list that I use to store my classes/scenes. states = [Level5(), Level4(), ... , TutorialState(), StartState()]. So every time the condition to advance is fulfilled, which in this class is to press 'SPACE', the window will be 'cleared' i.e. delete the sprites/labels and proceed to the next scene by using states.pop() and batch1.draw().
After I've typed these classes, I added this at the end:
#window.event
def on_draw():
window.clear()
batch.draw()
def update(dt):
if len(states):
states[-1].update(dt)
else:
pyglet.app.exit()
states.append(Level5())
states.append(Level4())
states.append(Level3())
states.append(Level2())
states.append(Level1())
states.append(TutorialState())
states.append(StartState())
pyglet.clock.schedule_interval(update, 1.0/60.0)
window.clear()
window.flip()
window.set_visible(True)
pyglet.app.run()
The problem here is that it only loads the starting batch/scene. Whenever I press 'SPACE' to go to the tutorial scene the labels/sprites of the starting batch disappear but it doesn't draw batch1 or load the the tutorial class/scene. Any suggestions?
After creating a batch for each scene class:
import pyglet
from pyglet.window import key
class SceneTemplate(object):
"""a template with common things used by every scene"""
def __init__(self, text):
self.batch = pyglet.graphics.Batch()
self.label = pyglet.text.Label(
text,
font_name='Arial', font_size=32,
color=(200, 255, 255, 255), x=32, y=704,
batch=self.batch)
# (...)
class MainMenuScene(SceneTemplate):
def __init__(self):
super(MainMenuScene, self).__init__(text='MainMenuScene')
# (...)
class IntroScene(SceneTemplate):
def __init__(self):
super(IntroScene, self).__init__(text='Introduction')
# (...)
class Level1(SceneTemplate):
def __init__(self):
super(Level1, self).__init__(text='Level 1')
# (...)
You can control the state/scene in another class, such as a window class (personally I like to subclass the pyglet window, to keep things organized and some other reasons):
class Window(pyglet.window.Window):
def __init__(self):
super(Window, self).__init__(width=1024, height=768)
self.states = [MainMenuScene(), IntroScene(), Level1()] # and so on...
self.current_state = 0 # later you change it to get the scene you want
self.set_visible()
def on_draw(self):
self.clear()
self.states[self.current_state].batch.draw()
def on_key_press(self, symbol, modifiers):
if symbol == key.SPACE:
new_state = self.current_state + 1
new_state = new_state % len(self.states)
self.current_state = new_state
# if you want each scene to handle input, you could use pyglet's push_handlers(), or even something like:
# self.states[self.current_state].on_key_press(symbol, modifiers)
# giving them access to the window instance might be needed.
if __name__ == '__main__':
window = Window()
pyglet.app.run()

Avoiding duplicate tasks in celery broker

I want to create the following flow using celery configuration\api:
Send TaskA(argB) Only if celery queue has no TaskA(argB) already pending
Is it possible? how?
You can make your job aware of other tasks by some sort of memoization. If you use a cache control key (redis, memcached, /tmp, whatever is handy), you can make execution depend on that key. I'm using redis as an example.
from redis import Redis
#app.task
def run_only_one_instance(params):
try:
sentinel = Redis().incr("run_only_one_instance_sentinel")
if sentinel == 1:
#I am the legitimate running task
perform_task()
else:
#Do you want to do something else on task duplicate?
pass
Redis().decr("run_only_one_instance_sentinel")
except Exception as e:
Redis().decr("run_only_one_instance_sentinel")
# potentially log error with Sentry?
# decrement the counter to insure tasks can run
# or: raise e
I cannot think of a way but to
Retrieve all executing and scheduled tasks via celery inspect
Iterate through them to see if your task is there.
check this SO question to see how the first point is done.
good luck
I don't know it's gonna help you more than the other answers, but there goes my approach, following the same idea given by srj. I needed a way to block my server to launch tasks with same id to queue. So I made a general function to help me.
def is_task_active_or_registered(app, task_id):
i = app.control.inspect()
active_dict = i.active()
scheduled_dict = i.scheduled()
keys_set = set(active_dict.keys() + scheduled_dict.keys())
tasks_ids_set = set()
for _dict in [active_dict, scheduled_dict]:
for k in keys_set:
for task in _dict[k]:
tasks_ids_set.add(task['id'])
if task_id in tasks_ids_set:
return True
else:
return False
So, I use it like this:
In the context where my celery-app object is available, I define:
def check_task_can_not_run(task_id):
return is_task_active_or_registered(app=celery, task_id=task_id)
And so, from my client request, I call this check_task_can_not_run(...) and block task from being launched in case of True.
I was facing similar problem. The Beat was making duplicates in my queue. I wanted to use expires but this feature isn't working properly https://github.com/celery/celery/issues/4300.
So here is scheduler which checks if task has been already enqueued (based on task name).
# -*- coding: UTF-8 -*-
from __future__ import unicode_literals
import json
from heapq import heappop, heappush
from celery.beat import event_t
from celery.schedules import schedstate
from django_celery_beat.schedulers import DatabaseScheduler
from typing import List, Optional
from typing import TYPE_CHECKING
from your_project import celery_app
if TYPE_CHECKING:
from celery.beat import ScheduleEntry
def is_task_in_queue(task, queue_name=None):
# type: (str, Optional[str]) -> bool
queues = [queue_name] if queue_name else celery_app.amqp.queues.keys()
for queue in queues:
if task in get_celery_queue_tasks(queue):
return True
return False
def get_celery_queue_tasks(queue_name):
# type: (str) -> List[str]
with celery_app.pool.acquire(block=True) as conn:
tasks = conn.default_channel.client.lrange(queue_name, 0, -1)
decoded_tasks = []
for task in tasks:
j = json.loads(task)
task = j['headers']['task']
if task not in decoded_tasks:
decoded_tasks.append(task)
return decoded_tasks
class SmartScheduler(DatabaseScheduler):
"""
Smart means that prevents duplicating of tasks in queues.
"""
def is_due(self, entry):
# type: (ScheduleEntry) -> schedstate
is_due, next_time_to_run = entry.is_due()
if (
not is_due or # duplicate wouldn't be created
not is_task_in_queue(entry.task) # not in queue so let it run
):
return schedstate(is_due, next_time_to_run)
# Task should be run (is_due) and it is present in queue (is_task_in_queue)
H = self._heap
if not H:
return schedstate(False, self.max_interval)
event = H[0]
verify = heappop(H)
if verify is event:
next_entry = self.reserve(entry)
heappush(H, event_t(self._when(next_entry, next_time_to_run), event[1], next_entry))
else:
heappush(H, verify)
next_time_to_run = min(verify[0], next_time_to_run)
return schedstate(False, min(next_time_to_run, self.max_interval))