Micropython webserver stops working when add code to read data from dht11 - webserver

I've downloaded webserver code from https://docs.micropython.org/en/v1.8/esp8266/esp8266/tutorial/network_tcp.html, it worked well. But after adding code reading dht11 values, webserver stops responding. What's wrong with my code?
import machine
import dht
import socket
import network
sta_if = network.WLAN(network.STA_IF)
sta_if.connect(SSID, PASS)
addr = socket.getaddrinfo('0.0.0.0', 80)[0][-1]
d = machine.Pin(5, machine.Pin.IN, machine.Pin.PULL_UP)
def measure():
d.measure()
temp = d.temperature()
hum = d.humidity()
return temp, hum
s = socket.socket()
s.bind(addr)
s.listen(1)
print('listening on', addr)
while True:
cl, addr = s.accept()
print('client connected from', addr)
cl_file = cl.makefile('rwb', 0)
while True:
line = cl_file.readline()
if not line or line == b'\r\n':
break
response = measure()
cl.send(response)
cl.close()

I see two problems with your code:
First, to read the DHT11 sensor you need to use a DHT object. Try replacing
d = machine.Pin(5, machine.Pin.IN, machine.Pin.PULL_UP)
with
d = dht.DHT11(machine.Pin(5))
Second, the output of your measure() function is a numeric tuple and you're passing that directly to cl.send(), but that method needs a bytes object. You need to encode the two values into a string then convert that into bytes first. Instead of
cl.send(response)
you probably want something like
message = 'Temperature {} Humidity {}'.format(response[0], response[1])
cl.send(bytes(message, 'utf-8'))

Related

uPY uart not communicating correctly with EG25-G

I had a motor controller connected to GP0 and GP1 so I know they work, however I cant seem to get a response from the SIM controller. Without the pico attached to the board, I can get it to work, but when I add the pico it seems like it wont send AT commands or translate received data if the pico is getting any at all. I have tried to run the code line by line in a live session and all I get is a single number that is equal to the number of letters inside the string that I am sending to the sim controller. ie uart.write(bytearray(b'ATE1\r\n')) would return >>> 6 6 being the number of characters in the code after b. I'm ordering a new pico to see if just maybe it was my sub par soldering, but in the mean time I could see if anyone more experienced than I can point out a error.
import machine
import os
import utime
import time
import binascii
from machine import UART
pwr_enable = 22 # EG25_4G Power key connected on GP22
uart_port = 0
uart_baud = 115200
# Initialize UART0
uart = machine.UART(uart_port, uart_baud)
print(os.uname())
def wait_resp_info(timeout=3000):
prvmills = utime.ticks_ms()
info = b""
while (utime.ticks_ms()-prvmills) < timeout:
if uart.any():
info = b"".join([info, uart.read(1)])
print(info.decode())
return info
def Check_and_start(): # Initialize SIM Module
while True:
uart.write(bytearray(b'ATE1\r\n'))
utime.sleep(10)
uart.write(bytearray(b'AT\r\n'))
rec_temp = wait_resp_info()
print(wait_resp_info())
print(rec_temp)
print(rec_temp.decode())
utime.sleep(10)
if 'OK' in rec_temp.decode():
print('OK response from AT command\r\n' + rec_temp.decode())
break
else:
power = machine.Pin(pwr_enable, machine.Pin.OUT)
power.value(1)
utime.sleep(2)
power.value(0)
print('No response, restarting\r\n')
utime.sleep(10)
def Network_check():# Network connectivity check
for i in range(1, 3):
if Send_command("AT+CGREG?", "0,1") == 1:
print('Connected\r\n')
break
else:
print('Device is NOT connected\r\n')
utime.sleep(2)
continue
def Str_to_hex_str(string):
str_bin = string.encode('utf-8')
return binascii.hexlify(str_bin).decode('utf-8')
Check_and_start()
Network_check()
Response is
>>> Check_and_start()
b''
b'\x00\x00'
No response, restarting
New Pico fixed my issue, I believe it to be that my inadequate soldering skills created the issue. Symptoms were, no UART data was being transmitted or received through UART pins 0 and 1. Solution was new Pico board was inserted in place of old one, same code was uploaded and ran successfully first time.

TTN (The Things Network) counter and Pycom Lopy4 problem

I have a problem on the counter in TTN. this problem happened after I added machine.deepsleep function in my LoPy4. I have used the nvram_save () and nvram_restore () functions, but the counter in TTN still not increment.
this screenCapture :
So, what's the problem in this case ?
this my code :
enter code here
import ads1x15 as extADC
import gc
import pycom
import socket
from network import LoRa
from uModBus.serial import Serial
from network import LoRa
import machine
import ubinascii
import utime
from machine import RTC, I2C
from machine import Pin
pycom.heartbeat(False)
rtc = RTC()
i2c = I2C(0, I2C.MASTER, pins=('P3', 'P4'), baudrate=100000)
adc = extADC.ADS1115(i2c, gain=1)
pinTrig = Pin('P23', mode=Pin.OUT)
# LoRa Socket Connection with two condition
if machine.reset_cause() == machine.DEEPSLEEP_RESET:
pinTrig.value(1) # enable High (12v)
print("WOKE UP FROM DEEPSLEEP 3 MINUTES !")
utime.sleep(1)
lora = LoRa(mode=LoRa.LORAWAN, region=LoRa.AS923)
lora.nvram_restore() # Nvram restore function
s = socket.socket(socket.AF_LORA, socket.SOCK_RAW)
s.setsockopt(socket.SOL_LORA, socket.SO_DR, 5)
s.setblocking(False)
s.bind(1)
else:
pinTrig.value(1)
utime.sleep(1)
print("I'M PowerOn by Humans or Hard reset !")
lora = LoRa(mode=LoRa.LORAWAN, region=LoRa.AS923)
lora.nvram_restore() # Nvram restore function
app_eui = ubinascii.unhexlify('********************')
app_key = ubinascii.unhexlify('-----------------------------')
lora.join(activation=LoRa.OTAA, auth=(app_eui, app_key), timeout=0)
while not lora.has_joined():
utime.sleep(2.5)
print('Not yet joined...')
print('Joined')
s = socket.socket(socket.AF_LORA, socket.SOCK_RAW)
s.setsockopt(socket.SOL_LORA, socket.SO_DR, 5)
s.setblocking(True)
s.bind(1)
### Begin sensor reading and sending function ##################
def read_data_Sensor () :
### End sensor reading and sending function ####################
try:
read_data_Sensor()
lora.nvram_save()
utime.sleep(1)
pinTrig.value(0)
print("DeepSleep Mode")
utime.sleep(1)
machine.deepsleep(180000)
except OSError:
print("Terjadi Error - Restart")
s.send(b"\xff\xff")
utime.sleep(1)
machine.reset()
My understanding of the nvmram_save/restore methods is that they restore the full state of the lora stack, including the 'joined/not joined' status.
If you explicitly do the join() every time, then this both wastes energy/time in the join exchange, and this process will always reset the counters back to 0.
I think your code should test lora.has_joined() after the nvram_restore(), and only do the join procedure if this returns False.
btw I have also experienced issues with pycom and nvmram_save/restore with TTN v3.

pyaudio save multiple .WAV file with nonblocking

Updates:
Now I found out that we can paste some code in the callback function and ended up more questions:
When will be call and stop the callback functions? when we open and close the stream?
The callback function can return the stream data(audio_data from the code). As we did not call the function, the pyaudio do it internally I believe. How do I get the return stream data from callback?
import pyaudio
import wave
import numpy as np
import npstreams
import time
CHUNK = 1024
FORMAT = pyaudio.paInt16
CHANNELS = 2
RATE = 44100
audio = pyaudio.PyAudio()
channel_1_frames = []
channel_2_frames = []
fulldata = np.array([])
def callback(in_data, frame_count, time_info, flag):
global b,a,fulldata #global variables for filter coefficients and array
audio_data = np.fromstring(in_data, dtype=np.int16)
channel_1 = audio_data[0::CHANNELS]
channel_2 = audio_data[1::CHANNELS]
data1 = channel_1.tostring()
data2 = channel_2.tostring()
channel_1_frames.append(data1)
channel_2_frames.append(data2)
wf1 = wave.open('Channel_1.wav', 'wb')
wf2 = wave.open('Channel_2.wav', 'wb')
wf1.setnchannels(1)
wf2.setnchannels(1)
wf1.setsampwidth(audio.get_sample_size(FORMAT))
wf2.setsampwidth(audio.get_sample_size(FORMAT))
wf1.setframerate(RATE)
wf2.setframerate(RATE)
wf1.writeframes(b''.join(channel_1_frames))
wf2.writeframes(b''.join(channel_2_frames))
wf1.close()
wf2.close()
return (audio_data, pyaudio.paContinue)
stream = audio.open(format=FORMAT,
channels=CHANNELS,
rate=RATE,
input=True,
#frames_per_buffer=CHUNK,
stream_callback=callback)
stream.start_stream()
while stream.is_active():
time.sleep(10)
stream.stop_stream()
stream.close()
audio.terminate()
=============================================
I am trying to record multiple channels into multiple .WAV file.
I can do that with stream.read() and numpy array to separate into different array, and save to .WAV file
stream = audio.open(format=FORMAT,
channels=CHANNELS,
rate=RATE,
input=True,
frames_per_buffer=CHUNK)
print("* recording")
channel_1_frames = []
channel_2_frames = []
for i in range(0, int(RATE / CHUNK * RECORD_SECONDS)):
data = stream.read(CHUNK)
# convert string to numpy array
data_array = np.frombuffer(data, dtype='int16')
# select channel
channel_1 = data_array[0::CHANNELS]
channel_2 = data_array[1::CHANNELS]
# convert numpy array to string
data1 = channel_1.tostring()
data2 = channel_2.tostring()
channel_1_frames.append(data1)
channel_2_frames.append(data2)
stream.stop_stream()
stream.close()
audio.terminate()
However, from the module documentation, https://people.csail.mit.edu/hubert/pyaudio/docs/#class-stream, it said stream.read() and stream.write() should not be used for non-blocking.
And I found a good non-blocking pyaudio example from Github: https://gist.github.com/sloria/5693955
Which did not use stream.read().
I am not sure if I can read and turn steam numpy array without stream.read()
So is it still possible to export the stream in to different .WAV? and make it non blocking?
Thanks
As I learn more in coding, I found the answers.
A1: The callback function run and stop with the stream.
######open stream with out starting automatically
audio = pyaudio.PyAudio()
stream = audio.open(format=format,
channels=2,
rate=44100,
input=True,
frames_per_buffer=44100,
stream_callback=self.get_callback(),
start=False)
######start,stop stream
stream.start_stream()
stream.close()
audio.terminate()
A2: To capture data in real time, we can use queue
self.recorded_frames = queue.Queue()
def get_callback(self):
def callback(in_data, frame_count, time_info, status):
self.recorded_frames.put(np.frombuffer(in_data, dtype=np.int16))
return in_data, pyaudio.paContinue
return callback

Movesense decode SBEM data from LogBook

I'm trying to get the LogBook data over BLE to my App.
This works fine for JSON, the data seems accurate.
But it takes along time due to the JSON encoding.
Getting the SBEM data is way faster. But I can't find any documentation on the encoding. I found out that the "Content" string is Base64 encoded.
It starts with SBEM which means, it is uncompressed as stated here:
https://bitbucket.org/suunto/movesense-device-lib/src/5bcf0b40644a17d48977cf011ebcf6191650c6f0/MovesenseCoreLib/resources/movesense-api/mem/logbook.yaml?fileviewer=file-view-default#lines-186
But I couldn't find anything else.
Has somebody further information on that or found out what the encoding is like?
Best regards
Alex
First some clarification: When requesting the JSON log from MDS/Logbook/ service the data itself is transferred from Movesense sensor in SBEM format and the conversion is performed on the phone. If you have specific examples where the said conversion is slow (there very well might be) it's a good idea to add a bitbucket issue to movesense-mobile-lib.
About the SBEM format. This is "Suunto Oy internal" binary format for presenting xml (and nowadays json) files. This means that the interpretation of it may change when the format evolves. With that warning aside, here's the format:
Data is encoded in chunks with ID(1-2 bytes), length(1-4 bytes) and content
consists of two separate sections: Descriptors & Data which can be in separate "files" (like in Logbook service)
Descriptors describe the format of the data in data chunks ("format string")
Data chunks contain the binary data in described format.
If you want to learn about the SBEM format that the DataLogger / Logbook services use, see the "generated/sbem-code" folder that is created during the build.
And finally, here is a simple python code for parsing SBEM format:
from __future__ import print_function
import sys
import re
import glob, os
data_path = sys.argv[0]
descriptor_path = sys.argv[1]
ReservedSbemId_e_Escape = b"\255"
ReservedSbemId_e_Descriptor = 0
#print("data_path:",data_path)
print("descriptor_path:",descriptor_path)
# reads sbem ID upto uint16 from file
def readId(f):
byte1 = f.read(1)
id = None
if not byte1:
print("EOF found")
elif byte1 < ReservedSbemId_e_Escape:
id = int.from_bytes(byte1, byteorder='little')
#print("one byte id:", id)
else:
# read 2 following bytes
id_bytes = f.read(2)
id = int.from_bytes(id_bytes, byteorder='little')
#print("two byte id:",id)
return id
# reads sbem length upto uint32 from file
def readLen(f):
byte1 = f.read(1)
if byte1 < ReservedSbemId_e_Escape:
datasize = int.from_bytes(byte1, byteorder='little')
#print("one byte len:", len)
else:
# read 4 following bytes
id_bytes = f.read(4)
datasize = int.from_bytes(id_bytes, byteorder='little')
#print("4 byte len:",len)
return datasize
# read sbem chunkheader from file
def readChunkHeader(f):
id = readId(f)
if id is None:
return (None,None)
datasize = readLen(f)
ret = (id, datasize)
print("SBEM chunk header:", ret)
print(" offset:", f.tell())
return ret
def readHeader(f):
# read header
header_bytes = f.read(8)
print("SBEM Header: ", header_bytes)
def parseDescriptorChunk(data_bytes):
print("parseDescriptorChunk data:", chunk_bytes)
return
def parseDataChunk(data_bytes):
print("parseDataChunk data:", chunk_bytes)
return
# read descriptors
with open(descriptor_path, 'rb') as f_desc:
readHeader(f_desc)
while True:
(id, datasize) = readChunkHeader(f_desc)
if id is None:
# print("None id:",id)
break;
chunk_bytes = f_desc.read(datasize)
if (len(chunk_bytes) != datasize):
print("ERROR: too few bytes returned.")
break
if id == ReservedSbemId_e_Descriptor:
parseDescriptorChunk(chunk_bytes)
else:
print("WARNING: data chunk in descriptor file!")
parseDataChunk(chunk_bytes)
# read data
with open(data_path, 'rb') as f_data:
readHeader(f_data)
while True:
(id, datasize) = readChunkHeader(f_data)
if id is None:
# print("None id:",id)
break;
chunk_bytes = f_data.read(datasize)
if (len(chunk_bytes) != datasize):
print("ERROR: too few bytes returned.")
break
if id == ReservedSbemId_e_Descriptor:
parseDescriptorChunk(chunk_bytes)
else:
parseDataChunk(chunk_bytes)
Full Disclaimer: I work for the Movesense team

Broadcast sendto failed

I am trying to broadcast data but the output is udp send failed. I chose a random port 33333. What's wrong with my code?
int main()
{
struct sockaddr_in udpaddr = { sin_family : AF_INET };
int xudpsock_fd,sock,len = 0,ret = 0,optVal = 0;
char buffer[255];
char szSocket[64];
memset(buffer,0x00,sizeof(buffer));
memset(&udpaddr,0,sizeof(udpaddr));
udpaddr.sin_addr.s_addr = INADDR_BROADCAST;
udpaddr.sin_port = htons(33333);
xudpsock_fd = socket(PF_INET,SOCK_DGRAM,IPPROTO_UDP);
optVal = 1;
ret = setsockopt(xudpsock_fd,SOL_SOCKET,SO_BROADCAST,(char*)&optVal,sizeof(optVal));
strcpy(buffer,"this is a test msg");
len = sizeof(buffer);
ret = sendto(xudpsock_fd,buffer,len,0,(struct sockaddr*)&udpaddr,sizeof(udpaddr));
if (ret == -1)
printf("udp send failed\n");
else
printf("udp send succeed\n");
return (0);
}
One problem is that the address family you are trying to send to is zero (AF_UNSPEC). Although you initialize the family to AF_INET at the top of the function, you later zero it out with memset.
On the system I tested with, the send actually works anyway for some strange reason despite the invalid address family, but you should definitely try fixing that first.
You probably had a problem with your default route (eg, you didn't have one). sendto needs to pick an interface to send the packet on, but the destination address was probably outside the Destination/Genmask for each defined interface (see the 'route' command-line tool).
The default route catches this type of packet and sends it through an interface despite this mismatch.
Setting the destination to 127.255.255.255 will usually cause the packet to be sent through the loopback interface (127.0.0.1), meaning it will be able to be read by applications that (in this case) are run on the local machine.