How to save each forloop output into separated file name not in a single file name? - python-3.7

I want to save each output of "forloop" into different text file, not in a single text file. Like for example. First loop output will be in Device1_Output01.txt, Second loop output
will be in Device2_Output02.txt, Device3_Output03.txt, etc. Please help me I'm a beginner. Appreciate your help in advance. Thank you.
import paramiko
import time
import sys
c = open("Command_List.txt", "r")
command_list = c.read().split("\n") /*Create a List from Command_List file */
d = open("Device_List.txt", "r")
nodes = d.read().split("\n") /*Create a List from Device_List file */
port = 22
username = "user"
password = "password"
for ip in nodes: /*Loop each ip in hosts list */
print("Login to:", ip)
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
ssh.connect(ip, port, username, password)
comm = ssh.invoke_shell()
for command in command_list: /*Loop each commmand in command_list
comm.send(' %s \n' %command)
time.sleep(.5)
output = comm.recv(9999)
output = output.decode('ascii').split(',') /*Convert string to List without any change*/
restorepoint = sys.stdout
sys.stdout = open('HWOutput.txt', "a") /*All output will be appended here. How will I save each forloop output into different filenames?.*/
print(''.join(output))
sys.stdout = restorepoint
ssh.close()

Just replace sys.stdout with an actual open file at the start of the loop, then close the file and revert back to initial stdout at the end.
for ip in nodes:
if not ip.strip(): continue
with open(ip + ".ssh-log.txt","wt") as sshlog:
sys.stdout = sshlog
print("Login to:", ip)
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
ssh.connect(ip, port, username, password)
comm = ssh.invoke_shell()
for command in command_list:
comm.send(' %s \n' %command)
time.sleep(.5)
output = comm.recv(9999)
output = output.decode('ascii').split(',')
print(''.join(output))
sys.stdout = sys.__stdout__
ssh.close()

Related

Container keeps on crashing while creating a deployment from a docker image in minikube

i have docker image containing python files which should download satellite imageries from scihub website. The docker image is working fine. Now when i want to create the deployment thorugh kubectl so that i can expose it as a service, its's container keeps on crashing. That's what the pod description says when seen through kubectl describe pod.
this is how i am trying to deploy sudo kubectl run back --image=back:latest --port=8080 --image-pull-policy Never. i also tried changing the port but it did not work. Here are the files within docker image.
Docker File
FROM python:3.7-stretch
COPY . /code
WORKDIR /code
RUN pip install -r requirements.txt
ENTRYPOINT ["python", "ingestion.py"]
** ingestion **
import os
import shutil
import logging
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(name)s - %(message)s')
logger = logging.getLogger("ingestion")
import requests
import datahub
scihub_username = os.environ["scihub_username"]
scihub_password = os.environ["scihub_password"]
result_url = "http://" + os.environ["CDINRW_BASE_URL"] + "/jobs/" + os.environ["CDINRW_JOB_ID"] + "/results"
logger.info("Searching the Copernicus Open Access Hub")
scenes = datahub.search(username=scihub_username,
password=scihub_password,
producttype=os.getenv("producttype"),
platformname=os.getenv("platformname"),
days_back=os.getenv("days_back", 2),
footprint=os.getenv("footprint"),
max_cloud_cover_percentage=os.getenv("max_cloud_cover_percentage"),
start_date = os.getenv("start_date"),
end_date = os.getenv("end_date"))
logger.info("Found {} relevant scenes".format(len(scenes)))
job_results = []
for scene in scenes:
# do not donwload a scene that has already been ingested
if os.path.exists(os.path.join("/out_data", scene["title"]+".SAFE")):
logger.info("The scene {} already exists in /out_data and will not be downloaded again.".format(scene["title"]))
filename = scene["title"]+".SAFE"
else:
logger.info("Starting the download of scene {}".format(scene["title"]))
filename = datahub.download(scene, "/tmp", scihub_username, scihub_password, unpack=True)
logger.info("The download was successful.")
shutil.move(filename, "/out_data")
result_message = {"description": "test",
"type": "Raster",
"format": "SAFE",
"filename": os.path.basename(filename)}
job_results.append(result_message)
res = requests.put(result_url, json=job_results, timeout=60)
res.raise_for_status()
*datahub
import logging
import os
import urllib.parse
import zipfile
import requests
# constructing URLs for querying the data hub
_BASE_URL = "https://scihub.copernicus.eu/dhus/"
SITE = {}
SITE["SEARCH"] = _BASE_URL + "search?format=xml&sortedby=beginposition&order=desc&rows=100&start={offset}&q="
_PRODUCT_URL = _BASE_URL + "odata/v1/Products('{uuid}')/"
SITE["CHECKSUM"] = _PRODUCT_URL + "Checksum/Value/$value"
SITE["SAFEZIP"] = _PRODUCT_URL + "$value"
logger = logging.getLogger(__name__)
def _build_search_url(producttype=None, platformname=None, days_back=2, footprint=None, max_cloud_cover_percentage=None, start_date=None, end_date=None):
search_terms = []
if producttype:
search_terms.append("producttype:{}".format(producttype))
if platformname:
search_terms.append("platformname:{}".format(platformname))
if start_date and end_date:
search_terms.append(
"beginPosition:[{}+TO+{}]".format(start_date, end_date))
elif days_back:
search_terms.append(
"beginPosition:[NOW-{}DAYS+TO+NOW]".format(days_back))
if footprint:
search_terms.append("footprint:%22Intersects({})%22".format(
footprint.replace(" ", "+")))
if max_cloud_cover_percentage:
search_terms.append("cloudcoverpercentage:[0+TO+{}]".format(max_cloud_cover_percentage))
url = SITE["SEARCH"] + "+AND+".join(search_terms)
return url
def _unpack(zip_file, directory, remove_after=False):
with zipfile.ZipFile(zip_file) as zf:
# This assumes that the zipfile only contains the .SAFE directory at root level
safe_path = zf.namelist()[0]
zf.extractall(path=directory)
if remove_after:
os.remove(zip_file)
return os.path.normpath(os.path.join(directory, safe_path))
def search(username, password, producttype=None, platformname=None ,days_back=2, footprint=None, max_cloud_cover_percentage=None, start_date=None, end_date=None):
""" Search the Copernicus SciHub
Parameters
----------
username : str
user name for the Copernicus SciHub
password : str
password for the Copernicus SciHub
producttype : str, optional
product type to filter for in the query (see https://scihub.copernicus.eu/userguide/FullTextSearch#Search_Keywords for allowed values)
platformname : str, optional
plattform name to filter for in the query (see https://scihub.copernicus.eu/userguide/FullTextSearch#Search_Keywords for allowed values)
days_back : int, optional
number of days before today that will be searched. Default are the last 2 days. If start and end date are set the days_back parameter is ignored
footprint : str, optional
well-known-text representation of the footprint
max_cloud_cover_percentage: str, optional
percentage of cloud cover per scene. Can only be used in combination with Sentinel-2 imagery.
(see https://scihub.copernicus.eu/userguide/FullTextSearch#Search_Keywords for allowed values)
start_date: str, optional
start point of the search extent has to be used in combination with end_date
end_date: str, optional
end_point of the search extent has to be used in combination with start_date
Returns
-------
list
a list of scenes that match the search parameters
"""
import xml.etree.cElementTree as ET
scenes = []
search_url = _build_search_url(producttype, platformname, days_back, footprint, max_cloud_cover_percentage, start_date, end_date)
logger.info("Search URL: {}".format(search_url))
offset = 0
rowsBreak = 5000
name_space = {"atom": "http://www.w3.org/2005/Atom",
"opensearch": "http://a9.com/-/spec/opensearch/1.1/"}
while offset < rowsBreak: # Next pagination page:
response = requests.get(search_url.format(offset=offset), auth=(username, password))
root = ET.fromstring(response.content)
if offset == 0:
rowsBreak = int(
root.find("opensearch:totalResults", name_space).text)
for e in root.iterfind("atom:entry", name_space):
uuid = e.find("atom:id", name_space).text
title = e.find("atom:title", name_space).text
begin_position = e.find(
"atom:date[#name='beginposition']", name_space).text
end_position = e.find(
"atom:date[#name='endposition']", name_space).text
footprint = e.find("atom:str[#name='footprint']", name_space).text
scenes.append({
"id": uuid,
"title": title,
"begin_position": begin_position,
"end_position": end_position,
"footprint": footprint})
# Ultimate DHuS pagination page size limit (rows per page).
offset += 100
return scenes
def download(scene, directory, username, password, unpack=True):
""" Download a Sentinel scene based on its uuid
Parameters
----------
scene : dict
the scene to be downloaded
path : str
the path where the file will be downloaded to
username : str
username for the Copernicus SciHub
password : str
password for the Copernicus SciHub
unpack: boolean, optional
flag that defines whether the downloaded product should be unpacked after download. defaults to true
Raises
------
ValueError
if the size of the downloaded file does not match the Content-Length header
ValueError
if the checksum of the downloaded file does not match the checksum provided by the Copernicus SciHub
Returns
-------
str
path to the downloaded file
"""
import hashlib
md5hash = hashlib.md5()
md5sum = requests.get(SITE["CHECKSUM"].format(
uuid=scene["id"]), auth=(username, password)).text
download_path = os.path.join(directory, scene["title"] + ".zip")
# overwrite if path already exists
if os.path.exists(download_path):
os.remove(download_path)
url = SITE["SAFEZIP"].format(uuid=scene["id"])
rsp = requests.get(url, auth=(username, password), stream=True)
cl = rsp.headers.get("Content-Length")
size = int(cl) if cl else -1
# Actually fetch now:
with open(download_path, "wb") as f: # Do not read as a whole into memory:
written = 0
for block in rsp.iter_content(8192):
f.write(block)
written += len(block)
md5hash.update(block)
written = os.path.getsize(download_path)
if size > -1 and written != size:
raise ValueError("{}: size mismatch, {} bytes written but expected {} bytes to write!".format(
download_path, written, size))
elif md5sum:
calculated = md5hash.hexdigest()
expected = md5sum.lower()
POD events
Events:
Type Reason Age From Message
---- ------ ---- ---- -------
Warning BackOff 2m39s (x18636 over 2d19h) kubelet, minikube Back-off restarting failed container
the system which wants to use this service already has another main front end service running(which just runs the application ) on 8081 so maybe i need to expose this on the same port. How can i make the deployments running?

get_process_lines in liquidsoap 1.3.0

I've just updated Liquidsoap to 1.3.0 and now get_process_lines does not return anything.
def get_request() =
# Get the URI
lines = get_process_lines("curl http://localhost:3000/api/v1/liquidsoap/next/my-radio")
log("liquidsoap curl returns #{lines}")
uri = list.hd(default="",lines)
log("liquidsoap will try and play #{uri}")
# Create a request
request.create(uri)
end
I read on the CHANGELOG
- Moved get_process_lines and get_process_output to utils.liq, added optional env parameter
Does it mean I have to do something to use utils.liq in my script now ?
The full script is as follows
set("log.file",false)
set("log.stdout",true)
set("log.level",3)
def apply_metadata(m) =
title = m["title"]
artist = m["artist"]
log("Now playing: #{title} by #{artist}")
end
# Our custom request function
def get_request() =
# Get the URI
lines = get_process_lines("curl http://localhost:3000/api/v1/liquidsoap/next/my-radio")
log("liquidsoap curl returns #{lines}")
uri = list.hd(default="",lines)
log("liquidsoap will try and play #{uri}")
# Create a request
request.create(uri)
end
def my_safe(s) =
security = sine()
fallback(track_sensitive=false,[s,security])
end
s = request.dynamic(id="s",get_request)
s = on_metadata(apply_metadata,s)
s = crossfade(s)
s = my_safe(s)
# We output the stream to an icecast
# server, in ogg/vorbis format.
log("liquidsoap starting")
output.icecast(
%mp3(id3v2=true,bitrate=128,samplerate=44100),
host = "localhost",
port = 8000,
password = "PASSWORD",
mount = "myradio",
genre="various",
url="http://www.myradio.fr",
description="My Radio",
s
)
Of course the API is working
$ curl http://localhost:3000/api/v1/liquidsoap/next/my-radio
annotate:title="Chamakay",artist="Blood Orange",album="Cupid Deluxe":http://localhost/stream/3.mp3
A more simple example :
lines = get_process_lines("echo hi")
log("lines = #{lines}")
line = list.hd(default="",lines)
log("line = #{line}")
returns the following logs
2017/05/05 15:24:42 [lang:3] lines = []
2017/05/05 15:24:42 [lang:3] line =
Many thanks in advance for your help !
geoffroy
The issue was fixed in liquidsoap 1.3.1
Fixed:
Fixed run_process, get_process_lines, get_process_output when compiling with OCaml <= 4.03 (#437, #439)
https://github.com/savonet/liquidsoap/blob/1.3.1/CHANGES#L12

get AT command call response

I'm trying a code to make a voice call using usb modem and it succeeded to make a call... now i want to get that call response to know if number is ringing,busy or unavailable
This is my used code:
string number = textBox1.Text;
po.PortName = "COM3";
po.BaudRate = int.Parse("9600");
po.DataBits = Convert.ToInt32("8");
po.Parity = Parity.None;
po.StopBits = StopBits.One;
po.ReadTimeout = int.Parse("300");
po.WriteTimeout = int.Parse("300");
po.Encoding = Encoding.GetEncoding("iso-8859-1");
po.Open();
po.DtrEnable = true;
po.RtsEnable = true;
po.Write("ATDT "+number+";\r");
System.Threading.Thread.Sleep(7000);
po.WriteLine("ATH+CHUP;\r");
po.DiscardInBuffer();
po.DiscardOutBuffer();
po.Close();
After ATD, you need reading the port for kind of information called URC.
For voice call, there are the following possible response,
If no dialtone
NO DIALTONE
If busy,
BUSY
If connection cannot be set up:
NO CARRIER
NO ANSWER
And, before ATD, you'd better set the error format using at+cmee, for exam, at+cmee=2 will enable the string format.
EDIT:(Here is an example with python)
#! /usr/bin/env python
# -*- coding: utf8 -*-
from __future__ import print_function
import sys
import serial
NUM = "111111111"
ser = serial.Serial("com1", 115200)
ser.write('at+cmee=2\r')
ser.timeout = 10.0
res = "invalid"
while len(res) > 0:
res = ser.read(1)
print(res, end='')
ser.write('atd' + NUM + ';\r')
ser.timeout = 60.0
res = "invalid"
while len(res) > 0:
res = ser.read(1)
print(res, end='')
ser.write("AT+CHUP\r")
ser.timeout = 10.0
res = "invalid"
while len(res) > 0:
res = ser.read(1)
print(res, end='')
Its output is (I reject the call from the phone "111111111"),
at+cmee=2
OK
atd111111111;
OK
NO CARRIER
AT+CHUP
+CME ERROR: operation not allowed
And, after the output of 'no carrier', there is no more need to hang up.

An error in my code to be a simple ftp

I met an error when running codes at the bottom. It's like a simple ftp.
I use python2.6.6 and CentOS release 6.8
In most linux server, it gets right results like this:(I'm very sorry that I have just sign up and couldn't )
Clinet:
[root#Test ftp]# python client.py
path:put|/home/aaa.txt
Server:
[root#Test ftp]# python server.py
connected...
pre_data:put|aaa.txt|4
cmd: put
file_name: aaa.txt
file_size: 4
upload successed.
But I get errors in some server(such as my own VM in my PC). I have done lots of tests(python2.6/python2.7, Centos6.5/Centos6.7) and found this error is not because them. Here is the error imformation:
[root#Lewis-VM ftp]# python server.py
connected...
pre_data:put|aaa.txt|7sdfsdf ###Here gets the wrong result, "sdfsdf" is the content of /home/aaa.txt and it shouldn't be sent here to 'file_size' and so it cause the "ValueError" below
cmd: put
file_name: aaa.txt
file_size: 7sdfsdf
----------------------------------------
Exception happened during processing of request from ('127.0.0.1', 10699)
Traceback (most recent call last):
File "/usr/lib64/python2.6/SocketServer.py", line 570, in process_request_thread
self.finish_request(request, client_address)
File "/usr/lib64/python2.6/SocketServer.py", line 332, in finish_request
self.RequestHandlerClass(request, client_address, self)
File "/usr/lib64/python2.6/SocketServer.py", line 627, in __init__
self.handle()
File "server.py", line 30, in handle
if int(file_size)>recv_size:
ValueError: invalid literal for int() with base 10: '7sdfsdf\n'
What's more, I found that if I insert a time.sleep(1) between sk.send(cmd+"|"+file_name+'|'+str(file_size)) and sk.send(data) in client.py, the error will disappear. I have said that I did tests in different system and python versions and the error is not because them. So I guess that is it because of some system configs? I have check about socket.send() and socket.recv() in python.org but fount nothing helpful. So could somebody help me to explain why this happend?
The code are here:
#!/usr/bin/env python
#coding:utf-8
################
#This is server#
################
import SocketServer
import os
class MyServer(SocketServer.BaseRequestHandler):
def handle(self):
base_path = '/home/ftp/file'
conn = self.request
print 'connected...'
while True:
#####receive pre_data: we should get data like 'put|/home/aaa|7'
pre_data = conn.recv(1024)
print 'pre_data:' + pre_data
cmd,file_name,file_size = pre_data.split('|')
print 'cmd: ' + cmd
print 'file_name: '+ file_name
print 'file_size: '+ file_size
recv_size = 0
file_dir = os.path.join(base_path,file_name)
f = file(file_dir,'wb')
Flag = True
####receive 1024bytes each time
while Flag:
if int(file_size)>recv_size:
data = conn.recv(1024)
recv_size+=len(data)
else:
recv_size = 0
Flag = False
continue
f.write(data)
print 'upload successed.'
f.close()
instance = SocketServer.ThreadingTCPServer(('127.0.0.1',9999),MyServer)
instance.serve_forever()
#!/usr/bin/env python
#coding:utf-8
################
#This is client#
################
import socket
import sys
import os
ip_port = ('127.0.0.1',9999)
sk = socket.socket()
sk.connect(ip_port)
while True:
input = raw_input('path:')
#####we should input like 'put|/home/aaa.txt'
cmd,path = input.split('|')
file_name = os.path.basename(path)
file_size=os.stat(path).st_size
sk.send(cmd+"|"+file_name+'|'+str(file_size))
send_size = 0
f= file(path,'rb')
Flag = True
#####read 1024 bytes and send it to server each time
while Flag:
if send_size + 1024 >file_size:
data = f.read(file_size-send_size)
Flag = False
else:
data = f.read(1024)
send_size+=1024
sk.send(data)
f.close()
sk.close()
The TCP is a stream of data. That is the problem. TCP do not need to keep message boundaries. So when a client calls something like
connection.send("0123456789")
connection.send("ABCDEFGHIJ")
then a naive server like
while True;
data = conn.recv(1024)
print data + "_"
may print any of:
0123456789_ABCDEFGHIJ_
0123456789ABCDEFGHIJ_
0_1_2_3_4_5_6_7_8_9_A_B_C_D_E_F_G_H_I_J_
The server has no chance to recognize how many sends client called because the TCP stack at client side just inserted data to a stream and the server must be able to process the data received in different number of buffers than the client used.
Your server must contain a logic to separate the header and the data. All of application protocols based on TCP use a mechanism to identify application level boundaries. For example HTTP separates headers and body by an empty line and it informs about the body length in a separate header.
Your program works correctly when server receives a header with the command, name and size in a separate buffer it it fails when client is fast enough and push the data into stream quickly and the server reads header and data in one chunk.

Send Multiple commands using Netmiko

I am struggling to send multiple commands to multiple hosts , i am using commands input from a file:
commands.txt
sh ip int bri
sh run
sh ver
hosts.txt
router 1
router 2
router 3
I then run following
from future import print_function from netmiko import ConnectHandler ##For SSH import re import getpass while True: #create loop for whole program username = input ("Enter Username") jumphostpassword = getpass.getpass("Enter Jumphost Password") elif (op == 2): TACACSpassword = getpass.getpass ("Enter TACACS Password") elif(in1=="c"): commandsfile = input ("Please Enter CommandsFile path as c:/example/ \n :") hostsfile = input ("Please Enter Hosts path as c:/example/ \n :") # hosts = open((hostsfile) , "r") hosts = [hosts for hosts in (hosts.strip() for hosts in open(hostsfile)) if hosts] for host1 in hosts: with open ( host1+".txt","w") as file: commands1 = open( (commandsfile), "r+") jumphost = {'device_type': 'linux','ip': '172.27.200.26', 'username': (username),'password': (jumphostpassword)} net_connect = ConnectHandler(**jumphost) output = net_connect.send_command("ssh " +str(host1)) print (output) else: output = net_connect.send_command(TACACSpassword) print (output) output = net_connect.send_command("term leng 0") print (output) cmd1 = [cmd1 for cmd1 in (cmd1.strip() for cmd1 in open(commandsfile)) if cmd1] for cmd1 in commands1: print ("File saved in c:\saad\saad.txt ") output += net_connect.send_config_set(cmd1) print (output) net_connect.disconnect print ("File saved in c:\saad\saad.txt ") file.write(output) file.close() continue
Place your IPs in ips.csv file in the following format...
Host
192.168.1.1
192.168.1.2
Then use the following code, usage python code.py -c ips.csv
#!/usr/bin/python
import getpass
import re
import csv
import paramiko
import netmiko
from argparse import ArgumentParser
from netmiko import ConnectHandler
if __name__ == '__main__':
parser = ArgumentParser(description='Arguments:')
parser.add_argument('-c', '--csv', required=True, action='store',
help='Location of CSV file of IPs')
args = parser.parse_args()
ssh_username = 'yoursshusername'
ssh_password = 'yoursshpassword'
with open(args.csv, 'r') as file:
reader = csv.DictReader(file)
for device_row in reader:
try:
ssh_session = ConnectHandler(device_type='cisco_ios',
ip=device_row['Host'],
username=ssh_username, password=ssh_password)
print '********* {0} *********'.format(device_row['Host'
])
# Specify your commands here, you can add more commands just follow the same syntax
print ssh_session.send_command('show running-config | i hostname')
# Specify exceptions here
except paramiko.AuthenticationException:
print ('{0}'.format(device_row['Host']),"Authenticaiton Problem!")
pass