flask/MongoDB error on local server using raspberry pi3 - raspbian os - mongodb

i've made a local server using flask and mongoDB which works great on windows, but when i moved my code to the raspberry pi, i've got an error which i couldn't figure out why it occurs.
the code im using:
1) for the flask server
from flask import Flask
from flask import jsonify
from flask import request
import pymongo
import time
import datetime
import json
app = Flask(__name__)
client = pymongo.MongoClient("localhost", 27017)
db = client['mqtt-db']
obs_collection = db['mqtt-collection']
#app.route("/obs")
def obs():
data_str = request.args.get("data")
print data_str
data = json.loads(data_str)
print data
data["date"] = datetime.datetime.now()
obs_collection.save(data)
return "success"
#app.route("/get_obs")
def get_obs():
res = []
for row in obs_collection.find():
del row['_id']
res.append(row)
return jsonify(res)
#app.route("/delete_all")
def delete_all():
res = obs_collection.delete_many({})
return jsonify({"deleted": res.deleted_count})
if __name__ == "__main__":
app.run(host="0.0.0.0", debug=True)
2) script for inserting messages into db , using mqtt protocol:
import paho.mqtt.client as mqtt
import pymongo
import json
import datetime
topic = "sensor"
host = "10.0.0.6"
client = pymongo.MongoClient("localhost", 27017)
db = client['mqtt-db']
mqtt_collection = db['mqtt-collection']
# The callback for when the client receives a CONNACK response from the server.
def on_connect(client, userdata, flags, rc):
print("Connected with result code "+str(rc))
# Subscribing in on_connect() means that if we lose the connection and
# reconnect then subscriptions will be renewed.
client.subscribe(topic)
# The callback for when a PUBLISH message is received from the server.
def on_message(client, userdata, msg):
data_str = str(msg.payload)
data = json.loads(data_str)
print data_str
print data
data["date"] = datetime.datetime.now()
mqtt_collection.save(data)
print(msg.topic+" "+str(msg.payload))
client = mqtt.Client()
client.on_connect = on_connect
client.on_message = on_message
client.connect(host, 1883, 60)
# Blocking call that processes network traffic, dispatches callbacks and
# handles reconnecting.
# Other loop*() functions are available that give a threaded interface and a
# manual interface.
client.loop_forever()
the error occurs when i try to retrieve data from the server using "get_obs" function.
the error is: "Value Error: dictionary update sequence element #0 has length 4; 2 is required"
appreciate your help.

as #davidism suggested, the solution was to update to the latest version of Flask

Related

MicroPython: How to auto reconnect STA to AP if AP gets power cycled?

I am writing a python code on ESP8266 using MicroPython to configure it as STA, connect it to an AccessPoint and then send some data on UDP Socket.
I want to implement a scenario where in case if AccessPoint due to some reason goes down and comes back after some time, the the ESP8266 acting as STA should automatically reconnect to specific AP.
I am not sure if there's a functionality to setup a callback on STA if it misses few Beacons (e.g. upto 5 or 10)
This is the code that I have written so far:
import network
import esp
import gc
import time
try:
import usocket as socket
except ModuleNotFoundError as e:
import socket
def main():
count = 0
esp.osdebug(None)
gc.collect()
sta = network.WLAN(network.STA_IF)
sta.active(True)
sta.connect('HumidityServer', 'password#123')
while not sta.active():
pass
print('Connection successful')
print(sta.ifconfig())
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(('192.168.45.1', 9000))
while True:
str_to_send = "Hello ESP8266: " + str(count)
s.sendto(str_to_send, ('192.168.45.1', 9000))
request = s.recv(1024)
print('Content = %s' % str(request))
count = count + 1
time.sleep(5)
if __name__ == "__main__":
main()

pymodbus read_input_registers error : ModbusIOException' object has no attribute 'registers'

I'm using pymodbus (v3.0.2) on ubuntu linux server 22.04
using to read modbus rtu device (masibus datalogger)
when I'm trying to read input register, it some time returning the data which is correct, and most of the time giving this error
"ModbusIOException' object has no attribute 'registers'
here is my code to
#!/usr/bin/python3
import time
import datetime
from datetime import timedelta
import mysql.connector
from mysql.connector import Error
from pymodbus.client import ModbusSerialClient
client = ModbusSerialClient(
method="rtu",
port="/dev/ttyUSB0",
stopbits=2,
bytesize=8,
parity="N",
baudrate=9600)
connection = client.connect()
if connection is True:
print("Modbus Connection Successful")
def read_mbr():
readreg = client.read_input_registers(0, 15, unit=1).registers
# print(f'Data = {readreg}')
return readreg
while(1):
try:
data = read_mbr()
print(data)
except Exception as e:
print(e)
time.sleep(1)
time.sleep(1)
I've tried to add more time detail, but did not worked.

Python Faust await agent.ask() doesn't return reply and hangs function calling it

I am new to Python, playing around with stuff, trying to communicate python services via Kafka using Faust.
So I have small PoC project.
Faust app definition:
# app.py
import faust as f
from models import ReadRequest, ReadResponse
app = f.App("faust-app", broker="kafka://localhost:9092", store="rocksdb://")
topics = {"read-request": app.topic("read-request", value_type=ReadRequest)}
def get_app() -> f.types.AppT:
return app
def get_topic(name: str) -> f.types.TopicT:
return topics[name]
My DB reader agent:
# reader.py
import pandas as pd
from pymongo import MongoClient
from app import get_app, get_topic
client = MongoClient()
app = get_app()
req_topic = get_topic("read-request")
#app.agent(req_topic)
async def read_request(requests):
async for request in requests:
db = client.test
coll = db[request["collection"]]
result = coll.find(request["query"])
df = pd.DataFrame(result)
response = {
"id": request["id"],
"data": list(df.loc[:, df.columns != "_id"].to_dict(orient="records")),
}
print(response) # debug <1>
yield response
Model definitions:
# models.py
import faust as f
class ReadRequest(f.Record):
id: int
collection: str
query: dict
Test agent.ask()
# test.py
import asyncio
from reader import read_request
from models import ReadRequest
async def run():
result = await read_request.ask(ReadRequest(id=1, collection="test", query={}))
print(result) # debug <2>
if __name__ == "__main__":
asyncio.run(run())
So I have zookeeper, kafka server, mongodb and faust worker reader running. Everything is using out of the box configs.
When I run python3 test.py I see debug <1> print output as expected, but debug <2> never goes off and execution hangs there.
Faust docs say
So I assume that I am doing everything right.
Anyone has any clues?

How to create a mongoengine connection with ssh?

I'm trying to create a connection and add a document with mongoengine through an SSH tunnel.
A successful attempt with pymongo can be seen below, I simply want something similar with mongoengine. :-)
from auth import *
import pymongo
from sshtunnel import SSHTunnelForwarder
server = SSHTunnelForwarder(
(HOST_IP, HOST_PORT),
ssh_username = SSH_USER,
ssh_password = SSH_PASS,
remote_bind_address = ('localhost', 27017)
)
server.start()
client = pymongo.MongoClient('127.0.0.1', server.local_bind_port)
db = client[MONGO_DB]
db.authenticate(MONGO_USER, MONGO_PASS)
coll = db.queue_db
coll.insert({"testFile42":43})
server.stop()
mongoengine.connect(
db=DB_NAME,
host="127.0.0.1",
port=server.local_bind_port
)

How do I get my asyncio client to call a socket server and waiting for response

I am working with an asyncio.Protocol server where the purpose is for the client to call the server, but wait until the server has responded and data is returned before stopping the client loop.
Based on the asyncio doc Echo Client and Server here: https://docs.python.org/3/library/asyncio-protocol.html#protocol-example-tcp-echo-server-and-client , results of transport.write(...) are returned immediately when called.
Through experience, calling loop.run_until_complete(coroutine) fails with RuntimeError: Event loop is running.
Running asyncio.sleep(n) in the data_received() method of the server doesn't have any effect either.
yield from asyncio.sleep(n) and yield from asyncio.async(asyncio.sleep(n)) in data_received() both hang the server.
My question is, how do I get my client to wait for the server to write a response before giving back control?
I guess to never use transport/protocol pair directly.
asyncio has Streams API for high-level programming.
Client code can look like:
#asyncio.coroutine
def communicate():
reader, writer = yield from asyncio.open_connection(HOST, PORT)
writer.write(b'data')
yield from writer.drain()
answer = yield from reader.read()
# process answer, maybe send new data back to server and wait for answer again
writer.close()
You don't have to change the client code.
echo-client.py
#!/usr/bin/env python3.4
import asyncio
class EchoClient(asyncio.Protocol):
message = 'Client Echo'
def connection_made(self, transport):
transport.write(self.message.encode())
print('data sent: {}'.format(self.message))
def data_received(self, data):
print('data received: {}'.format(data.decode()))
def connection_lost(self, exc):
print('server closed the connection')
asyncio.get_event_loop().stop()
loop = asyncio.get_event_loop()
coro = loop.create_connection(EchoClient, '127.0.0.1', 8888)
loop.run_until_complete(coro)
loop.run_forever()
loop.close()
The trick is to place your code (including self.transport methods) into a coroutine and use the wait_for() method, with the yield from statement in front of the statements that require their values returned, or ones which take a while to complete:
echo-server.py
#!/usr/bin/env python3.4
import asyncio
class EchoServer(asyncio.Protocol):
def connection_made(self, transport):
peername = transport.get_extra_info('peername')
print('connection from {}'.format(peername))
self.transport = transport
def data_received(self, data):
print('data received: {}'.format(data.decode()))
fut = asyncio.async(self.sleeper())
result = asyncio.wait_for(fut, 60)
#asyncio.coroutine
def sleeper(self):
yield from asyncio.sleep(2)
self.transport.write("Hello World".encode())
self.transport.close()
loop = asyncio.get_event_loop()
coro = loop.create_server(EchoServer, '127.0.0.1', 8888)
server = loop.run_until_complete(coro)
print('serving on {}'.format(server.sockets[0].getsockname()))
try:
loop.run_forever()
except KeyboardInterrupt:
print("exit")
finally:
server.close()
loop.close()
Call echo-server.py and then echo-client.py, the client will wait 2 seconds as determined by asyncio.sleep, then stop.