How to build a new kind of syntax for an object in python - python-3.7

How to build a new kind of syntax which when has been called run it's value in os.system
while True:
a=input('enter your direction: ')
if a!='':
!dir !+Shell(a)
else:
print('the dir can not be None')

Based on your clarification, maybe this would work for you:
import subprocess
while True:
cmd = input("Enter a command: ")
if cmd != "":
cmd = cmd.split("!")[1]
subprocess.call(cmd, shell=True)
else:
print("Input cannot be None")

Related

Python subprocess.call() not working with >> operator

I'm trying to write a program that automatically sets up python shell scripts for me. Heres the code:
#!/usr/bin/env python3
import click
import subprocess
#click.command()
#click.argument('name')
def foo(name):
subprocess.call("cd ~/bin", shell=True)
subprocess.call(["touch", name])
subprocess.call(["echo", "'#!/usr/bin/env python3'", ">>", name])
subprocess.call(["chmod", "+x", name])
if __name__ == '__main__':
foo()
When it runs this is the output I get:
'#!/usr/bin/env python3' >> foo
A better way to approach this problem is to use the built-in Python libraries for creating files so that you can catch the exceptions easier. I've made a simple outline for you, but you should add some error checking.
import click
import os
import stat
#click.command()
#click.argument('name')
def foo(name):
file_to_create = os.path.expanduser("~/bin/") + name
with open(file_to_create, 'w') as file:
file.write("#!/usr/bin/env python3\n")
file_stats = os.stat(file_to_create)
os.chmod(file_to_create, file_stats.st_mode | stat.S_IEXEC) # equivalent of 'chmod +x'
if __name__ == '__main__':
foo()

Backward search in ipython via FZF

The question is how can i use fzf for backward search in IPython shell?
pip install pyfzf
Put the following lines into your ipython startup file, say, any random file located in ~/.ipython/profile_default/startup/
Press Ctrl-R in ipython, and you are easy to go.
from prompt_toolkit.enums import DEFAULT_BUFFER
from prompt_toolkit.keys import Keys
from prompt_toolkit.filters import HasFocus, HasSelection
try:
from pyfzf import pyfzf
except:
print("pyfzf is not installed. Please install to enable fzf Ctrl-R search")
exit()
ipython = get_ipython()
fzf = pyfzf.FzfPrompt()
def is_in_empty_line(buf):
text = buf.text
cursor_position = buf.cursor_position
text = text.split('\n')
for line in text:
if len(line) >= cursor_position:
return not line
else:
cursor_position -= len(line) + 1
def fzf_i_search(event):
history_set = set()
history_strings = [i[2] for i in ipython.history_manager.get_tail(5000)][::-1]
history_strings = [s for s in history_strings if not (s in history_set or history_set.add(s))]
# refresh prompt
print("", end="\r", flush=True)
try:
text = fzf.prompt(history_strings, fzf_options='--no-sort --multi --reverse')
except:
return
buf = event.current_buffer
if not is_in_empty_line(buf):
buf.insert_line_below()
buf.insert_text('\n'.join(text))
# Register the shortcut if IPython is using prompt_toolkit
if getattr(ipython, 'pt_app', None):
registry = ipython.pt_app.key_bindings
registry.add_binding(Keys.ControlR,
filter=(HasFocus(DEFAULT_BUFFER)
& ~HasSelection()))(fzf_i_search)
del DEFAULT_BUFFER, Keys, HasFocus, HasSelection
del fzf_i_search

Send Multiple commands using Netmiko

I am struggling to send multiple commands to multiple hosts , i am using commands input from a file:
commands.txt
sh ip int bri
sh run
sh ver
hosts.txt
router 1
router 2
router 3
I then run following
from future import print_function from netmiko import ConnectHandler ##For SSH import re import getpass while True: #create loop for whole program username = input ("Enter Username") jumphostpassword = getpass.getpass("Enter Jumphost Password") elif (op == 2): TACACSpassword = getpass.getpass ("Enter TACACS Password") elif(in1=="c"): commandsfile = input ("Please Enter CommandsFile path as c:/example/ \n :") hostsfile = input ("Please Enter Hosts path as c:/example/ \n :") # hosts = open((hostsfile) , "r") hosts = [hosts for hosts in (hosts.strip() for hosts in open(hostsfile)) if hosts] for host1 in hosts: with open ( host1+".txt","w") as file: commands1 = open( (commandsfile), "r+") jumphost = {'device_type': 'linux','ip': '172.27.200.26', 'username': (username),'password': (jumphostpassword)} net_connect = ConnectHandler(**jumphost) output = net_connect.send_command("ssh " +str(host1)) print (output) else: output = net_connect.send_command(TACACSpassword) print (output) output = net_connect.send_command("term leng 0") print (output) cmd1 = [cmd1 for cmd1 in (cmd1.strip() for cmd1 in open(commandsfile)) if cmd1] for cmd1 in commands1: print ("File saved in c:\saad\saad.txt ") output += net_connect.send_config_set(cmd1) print (output) net_connect.disconnect print ("File saved in c:\saad\saad.txt ") file.write(output) file.close() continue
Place your IPs in ips.csv file in the following format...
Host
192.168.1.1
192.168.1.2
Then use the following code, usage python code.py -c ips.csv
#!/usr/bin/python
import getpass
import re
import csv
import paramiko
import netmiko
from argparse import ArgumentParser
from netmiko import ConnectHandler
if __name__ == '__main__':
parser = ArgumentParser(description='Arguments:')
parser.add_argument('-c', '--csv', required=True, action='store',
help='Location of CSV file of IPs')
args = parser.parse_args()
ssh_username = 'yoursshusername'
ssh_password = 'yoursshpassword'
with open(args.csv, 'r') as file:
reader = csv.DictReader(file)
for device_row in reader:
try:
ssh_session = ConnectHandler(device_type='cisco_ios',
ip=device_row['Host'],
username=ssh_username, password=ssh_password)
print '********* {0} *********'.format(device_row['Host'
])
# Specify your commands here, you can add more commands just follow the same syntax
print ssh_session.send_command('show running-config | i hostname')
# Specify exceptions here
except paramiko.AuthenticationException:
print ('{0}'.format(device_row['Host']),"Authenticaiton Problem!")
pass

environment variables using subprocess.check_output Python

I'm trying to do some basic module setups on my server using Python. Its a bit difficult as I have no access to the internet.
This is my code
import sys
import os
from subprocess import CalledProcessError, STDOUT, check_output
def run_in_path(command, dir_path, env_var=''):
env_var = os.environ["PATH"] = os.environ["PATH"] + env_var
print(env_var)
try:
p = check_output(command, cwd=dir_path, stderr=STDOUT)
except CalledProcessError as e:
sys.stderr.write(e.output.decode("utf-8"))
sys.stderr.flush()
return e.returncode
else:
return 0
def main():
requests_install = run_in_path('python setup.py build',
'D:\installed_software\python modules\kennethreitz-requests-e95e173')
SQL_install = run_in_path('python setup.py install', # install SQL module pypyodbc
'D:\installed_software\python modules\pypyodbc-1.3.3\pypyodbc-1.3.3')
setup_tools = run_in_path('python setup.py install', # install setup tools
'D:\installed_software\python modules\setuptools-17.1.1')
psycopg2 = run_in_path('easy_install psycopg2-2.6.1.win-amd64-py3.3-pg9.4.4-release', # install setup tools
'D:\installed_software\python modules', ';C:\srv_apps\Python33\Scripts\easy_install.exe')
print('setup complete')
if __name__ == "__main__":
sys.exit(main())
now it gets tricky when i start trying to use easy install. It appears my env variables are not being used by my subprocess.check_output call
File "C:\srv_apps\Python33\lib\subprocess.py", line 1110, in _execute_child
raise WindowsError(*e.args)
FileNotFoundError: [WinError 2] The system cannot find the file specified
I don't want to have to upgrade to 3.4 where easy install is installed by default because my other modules are not supported on 3.4. My main challenge is the subprocess.check_call method does not take environment variables as an input and im wary of trying to use Popen() as I have never really got it to work successfully in the past. Any help would be greatly appreciated.
PATH should contain directories e.g., r'C:\Python33\Scripts', not files such as: r'C:\Python33\Scripts\easy_install.exe'
Don't hardcode utf-8 for an arbitrary command, you could enable text mode using universal_newlines parameter (not tested):
#!/usr/bin/env python3
import locale
import sys
from subprocess import CalledProcessError, STDOUT, check_output
def run(command, *, cwd=None, env=None):
try:
ignored = check_output(command, cwd=cwd, env=env,
stderr=STDOUT,
universal_newlines=True)
except CalledProcessError as e:
sys.stderr.write(e.output)
sys.stderr.flush()
return e.returncode
else:
return 0
Example:
import os
path_var = os.pathsep.join(os.environ.get('PATH', os.defpath), some_dir)
env = dict(os.environ, PATH=path_var)
run("some_command", cwd=some_path, env=env)
run("another_command", cwd=another_path, env=env)

Celery task subprocess stdout to log

I have a celery task which calls other python script external to Django application with subprocess. This program have some print's in it, and I want to have these print's in my celery log file or in my database. When I set CELERY_ALWAYS_EAGER = True in Django settings.py file, everything works fine. If I don't set this setting, celery task log subprocess stdout only when it exit. It seems like p.stdout.readline() is blocking.
run-test.py is a long process, couple of minutes, but it print what it's doing. I want to capture this.
#shared_task
def run_tests(scenario_path, vu):
basedir = os.path.abspath(os.path.dirname(__file__))
config_path = '%s/../../scripts/config.ini' % basedir
cmd = ['python', '%s/../../scripts/aws/run-test.py' % basedir, '%s' % config_path, scenario_path, str(vu), str(2)]
p = subprocess.Popen(cmd, stdout=subprocess.PIPE)
while True:
line = p.stdout.readline()
if line != '':
logger.info(line)
else:
return
I found this to be very useful, using select for polling instead of blocking on readline.
https://gist.github.com/bgreenlee/1402841
child = subprocess.Popen(popenargs, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, **kwargs)
log_level = {child.stdout: stdout_log_level,
child.stderr: stderr_log_level}
def check_io():
ready_to_read = select.select([child.stdout, child.stderr], [], [], 1000)[0]
for io in ready_to_read:
line = io.readline()
logger.log(log_level[io], line[:-1])
# keep checking stdout/stderr until the child exits
while child.poll() is None:
check_io()
check_io() # check again to catch anything after the process exits