Trigger in pymongo - mongodb

My Requirement is as follows:
In below table code if records gets inserted into "col" I should do some manipulations to "col2" table, How to achieve this? is my below code correct, I am trying to see what insert_change prints, but it is not printing anything
import pymongo
import logging
import Parameters,constants
class Stream:
def get_connection(self):
client = pymongo.MongoClient(
"mongodb://" + constants.USER_NAME + ":" + constants.PWD + constants.server + constants.CA_CERTIFICATES_PATH)
logging.info("Mongo DB Connection Established Sucessfully")
db = client[Parameters.STG_QC_Hub_Files]
col = db[Parameters.col7]
col2 = db[Parameters.col8]
return col,col2,db
def insert(self,col,col2,db):
try:
with db.col.watch([{'$match': {'operationType': 'insert'}}]) as stream:
print(stream)
for insert_change in stream:
print("hi")
print(insert_change)
except pymongo.errors.PyMongoError:
# The ChangeStream encountered an unrecoverable error or the
# resume attempt failed to recreate the cursor.
logging.error('...')
c=Stream()
col,col2,db=c.get_connection()
c.insert(col,col2,db)

The below code will print what you have inserted into the table:
import pymongo
import logging
import Parameters,constants,mongotriggers
from bson.json_util import dumps
class Stream:
def get_connection(self):
client = pymongo.MongoClient(
"mongodb://" + constants.USER_NAME + ":" + constants.PWD + constants.server + constants.CA_CERTIFICATES_PATH)
logging.info("Mongo DB Connection Established Sucessfully")
db = client[Parameters.QC_Quality_Hub]
col = db[Parameters.col7]
col2 = db[Parameters.col8]
return col,col2,db
def insert(self,col,col2,db):
try:
resume_token = None
pipeline = [{"$match": {"operationType": "insert"}}]
a = [doc for doc in col.find({})]
with col.watch(pipeline) as stream:
print(stream)
for insert_change in stream:
print(dumps(insert_change))
except pymongo.errors.PyMongoError:
logging.error("Oops")
c=Stream()
col,col2,db=c.get_connection()
c.insert(col,col2,db)

Related

script runs in jupyter notebook but not vscode

When I run this code in Jupyter notebook it runs without any error but when I want to run it in VSCODE I get an error , It works by url and a token ,then I got the Json file and exported it to csv file to open in Excel :
This is the code ;
import json
import requests
import pandas
url = "url"
headers = {"Cookie":"token"}
response = requests.get(url, headers=headers)
data = response.json()
print(data)
a=data['tickers'][0]['items'][0]['days'][0]['items']
from copy import deepcopy
import pandas
def cross_join(left, right):
new_rows = []
for left_row in left:
for right_row in right:
temp_row = deepcopy(left_row)
for key, value in right_row.items():
temp_row[key] = value
new_rows.append(deepcopy(temp_row))
return new_rows
def flatten_list(data):
for elem in data:
if isinstance(elem, list):
yield from flatten_list(elem)
else:
yield elem
def json_to_dataframe(data_in):
def flatten_json(data, prev_heading=''):
if isinstance(data, dict):
rows = [{}]
for key, value in data.items():
rows = cross_join(rows, flatten_json(value, prev_heading + '.' + key))
elif isinstance(data, list):
rows = []
for i in range(len(data)):
[rows.append(elem) for elem in flatten_list(flatten_json(data[i], prev_heading))]
else:
rows = [{prev_heading[1:]: data}]
return rows
return pandas.DataFrame(flatten_json(data_in))
if __name__ == '__main__':
json_data = a
df = json_to_dataframe(json_data)
print(df)
df.to_excel("output.xlsx")
This is the Error ;
ModuleNotFoundError Traceback (most recent call last)
---> 59 df.to_excel("output.xlsx")
-> 2026 formatter.write(
--> 730 writer = ExcelWriter(stringify_path(writer), engine=engine)
---> 18 from openpyxl.workbook import Workbook
ModuleNotFoundError: No module named 'openpyxl'
What am I missing ?
Run this command from cmd window in VSCODE.
pip install openpyxl

Parameterize the find method in python using mongo

Files to upload will be like WFSIV0101202001.318.tar.gz,WFSIV0101202001.2624.tar.gz etc.
INPUT_FILE_PATH = 'C:\Files to upload'
try:
import os
from google.cloud import storage
import sys
import pymongo
import pymongo.errors
from pymongo import MongoClient
from pymongo.errors import ConnectionFailure
except:
print("missing modules")
try:
mongo_client = MongoClient(host="xyz.com", port=27017)
Db = mongo_client['abcd']
coll = Db['shopper_journey_sitedata']
except ConnectionFailure:
print("Connection failed")
date=[]
# Thirdpartyid=[]
input_files = os.listdir(INPUT_FILE_PATH)
# looping through input files
for input_file in input_files:
x = input_file.split(".")
date.append(x[0][5:13])
tp_site_id = x[1]
# print(tp_site_id)
cur = coll.find({"third_party_site_id":tp_site_id})
for doc in cur:
print(doc)
Now i want to parameterize the find() method for every id, so that on each iteration i should get st_site_id ?
above code i tried but ist giving error as "Datas:name error"
You can do one thing
coll.find({"third_party_site_id": { $in :
[318,2624,2621,2622,102,078]}})
If Tid is an array, then you could replace 318 in your query to Tid[I]

flask/MongoDB error on local server using raspberry pi3 - raspbian os

i've made a local server using flask and mongoDB which works great on windows, but when i moved my code to the raspberry pi, i've got an error which i couldn't figure out why it occurs.
the code im using:
1) for the flask server
from flask import Flask
from flask import jsonify
from flask import request
import pymongo
import time
import datetime
import json
app = Flask(__name__)
client = pymongo.MongoClient("localhost", 27017)
db = client['mqtt-db']
obs_collection = db['mqtt-collection']
#app.route("/obs")
def obs():
data_str = request.args.get("data")
print data_str
data = json.loads(data_str)
print data
data["date"] = datetime.datetime.now()
obs_collection.save(data)
return "success"
#app.route("/get_obs")
def get_obs():
res = []
for row in obs_collection.find():
del row['_id']
res.append(row)
return jsonify(res)
#app.route("/delete_all")
def delete_all():
res = obs_collection.delete_many({})
return jsonify({"deleted": res.deleted_count})
if __name__ == "__main__":
app.run(host="0.0.0.0", debug=True)
2) script for inserting messages into db , using mqtt protocol:
import paho.mqtt.client as mqtt
import pymongo
import json
import datetime
topic = "sensor"
host = "10.0.0.6"
client = pymongo.MongoClient("localhost", 27017)
db = client['mqtt-db']
mqtt_collection = db['mqtt-collection']
# The callback for when the client receives a CONNACK response from the server.
def on_connect(client, userdata, flags, rc):
print("Connected with result code "+str(rc))
# Subscribing in on_connect() means that if we lose the connection and
# reconnect then subscriptions will be renewed.
client.subscribe(topic)
# The callback for when a PUBLISH message is received from the server.
def on_message(client, userdata, msg):
data_str = str(msg.payload)
data = json.loads(data_str)
print data_str
print data
data["date"] = datetime.datetime.now()
mqtt_collection.save(data)
print(msg.topic+" "+str(msg.payload))
client = mqtt.Client()
client.on_connect = on_connect
client.on_message = on_message
client.connect(host, 1883, 60)
# Blocking call that processes network traffic, dispatches callbacks and
# handles reconnecting.
# Other loop*() functions are available that give a threaded interface and a
# manual interface.
client.loop_forever()
the error occurs when i try to retrieve data from the server using "get_obs" function.
the error is: "Value Error: dictionary update sequence element #0 has length 4; 2 is required"
appreciate your help.
as #davidism suggested, the solution was to update to the latest version of Flask

Adding CommandLogger to MongoClient - monitoring

I have MongoClient from PyMongo.I do not know how to add a event_listener to an existing MongoClient. This is what I have:
from pymongo import MongoClient, monitoring
class CommandLogger(monitoring.CommandListener):
def started(self, event):
logging.info("Command {0.command_name} with request id "
"{0.request_id} started on server "
"{0.connection_id}".format(event))
def succeeded(self, event):
logging.info("Command {0.command_name} with request id "
"{0.request_id} on server {0.connection_id} "
"succeeded in {0.duration_micros} "
"microseconds".format(event))
def failed(self, event):
logging.error("Command {0.command_name} with request id "
"{0.request_id} on server {0.connection_id} "
"failed in {0.duration_micros} "
"microseconds".format(event))
monitoring.register(CommandLogger())
def get_mongo_client():
...
...
my_mongo_client = MongoClient(connString)
# Here, I want to add a event_listener - something like this:
# my_mongo_client = MongoClient(connString, event_listeners=[CommandLogger()])
return my_mongo_client
How do I create a mongo_client with a listener and with a connection string (uri)?
Yes, the code you commented-out is correct:
my_mongo_client = MongoClient(connString, event_listeners=[CommandLogger()])
Is something not working as expected when you try that?

BackUp Odoo 8 Windows erreur zero size file

I make a specific development of the db_backup openerp 7 module to running on version 8 Odoo.
So it is properly installed and it does backup
the problem is that the sql file size is zero KB
this is the code backup_scheduler.py
import xmlrpclib
import socket
import os
import time
import base64
from openerp.osv import fields,osv,orm
from openerp import tools, netsvc
from openerp.tools.translate import _
import logging
_logger = logging.getLogger(__name__)
def execute(connector, method, *args):
res = False
try:
res = getattr(connector,method)(*args)
except socket.error,e:
raise e
return res
addons_path = tools.config['addons_path'] + '/auto_backup/DBbackups'
class db_backup(osv.Model):
_name = 'db.backup'
def get_db_list(self, cr, user, ids, host='localhost', port='8069', context={}):
uri = 'http://' + host + ':' + port
conn = xmlrpclib.ServerProxy(uri + '/xmlrpc/db')
db_list = execute(conn, 'list')
return db_list
_columns = {
'host' : fields.char('Host', size=100, required='True'),
'port' : fields.char('Port', size=10, required='True'),
'name' : fields.char('Database', size=100, required='True',help='Database you want to schedule backups for'),
'bkp_dir' : fields.char('Backup Directory', size=100, help='Absolute path for storing the backups', required='True')
}
_defaults = {
'bkp_dir' : lambda *a : addons_path,
'host' : lambda *a : 'localhost',
'port' : lambda *a : '8069'
}
def _check_db_exist(self, cr, user, ids):
for rec in self.browse(cr,user,ids):
db_list = self.get_db_list(cr, user, ids, rec.host, rec.port)
if rec.name in db_list:
return True
return False
_constraints = [
(_check_db_exist, _('Error ! No such database exists!'), [])
]
def schedule_backup(self, cr, user, context={}):
conf_ids= self.search(cr, user, [])
confs = self.browse(cr,user,conf_ids)
for rec in confs:
db_list = self.get_db_list(cr, user, [], rec.host, rec.port)
if rec.name in db_list:
try:
if not os.path.isdir(rec.bkp_dir):
os.makedirs(rec.bkp_dir)
except:
raise
bkp_file='%s_%s.sql' % (rec.name, time.strftime('%Y%m%d_%H_%M_%S'))
file_path = os.path.join(rec.bkp_dir,bkp_file)
fp = open(file_path,'wb')
uri = 'http://' + rec.host + ':' + rec.port
conn = xmlrpclib.ServerProxy(uri + '/xmlrpc/db')
bkp=''
try:
bkp = execute(conn, 'dump', tools.config['admin_passwd'], rec.name)
except:
logger.notifyChannel('backup', netsvc.LOG_INFO, "Could'nt backup database %s. Bad database administrator password for server running at http://%s:%s" %(rec.name, rec.host, rec.port))
continue
bkp = base64.decodestring(bkp)
fp.write(bkp)
fp.close()
else:
logger.notifyChannel('backup', netsvc.LOG_INFO, "database %s doesn't exist on http://%s:%s" %(rec.name, rec.host, rec.port))
db_backup()
This is probably because an error occurs on the backend that does not get propagated back to the client. If you check the server logs at the time you take a backup you will probably see the issue.
Note If you need a script to get a backup from or restore a database to a v7, v8 or v9 server take a look at https://github.com/daramousk/odoo_remote_backup
I have developed a script for this specific reason which you can use or change to resolve your issue.