BackUp Odoo 8 Windows erreur zero size file - postgresql

I make a specific development of the db_backup openerp 7 module to running on version 8 Odoo.
So it is properly installed and it does backup
the problem is that the sql file size is zero KB
this is the code backup_scheduler.py
import xmlrpclib
import socket
import os
import time
import base64
from openerp.osv import fields,osv,orm
from openerp import tools, netsvc
from openerp.tools.translate import _
import logging
_logger = logging.getLogger(__name__)
def execute(connector, method, *args):
res = False
try:
res = getattr(connector,method)(*args)
except socket.error,e:
raise e
return res
addons_path = tools.config['addons_path'] + '/auto_backup/DBbackups'
class db_backup(osv.Model):
_name = 'db.backup'
def get_db_list(self, cr, user, ids, host='localhost', port='8069', context={}):
uri = 'http://' + host + ':' + port
conn = xmlrpclib.ServerProxy(uri + '/xmlrpc/db')
db_list = execute(conn, 'list')
return db_list
_columns = {
'host' : fields.char('Host', size=100, required='True'),
'port' : fields.char('Port', size=10, required='True'),
'name' : fields.char('Database', size=100, required='True',help='Database you want to schedule backups for'),
'bkp_dir' : fields.char('Backup Directory', size=100, help='Absolute path for storing the backups', required='True')
}
_defaults = {
'bkp_dir' : lambda *a : addons_path,
'host' : lambda *a : 'localhost',
'port' : lambda *a : '8069'
}
def _check_db_exist(self, cr, user, ids):
for rec in self.browse(cr,user,ids):
db_list = self.get_db_list(cr, user, ids, rec.host, rec.port)
if rec.name in db_list:
return True
return False
_constraints = [
(_check_db_exist, _('Error ! No such database exists!'), [])
]
def schedule_backup(self, cr, user, context={}):
conf_ids= self.search(cr, user, [])
confs = self.browse(cr,user,conf_ids)
for rec in confs:
db_list = self.get_db_list(cr, user, [], rec.host, rec.port)
if rec.name in db_list:
try:
if not os.path.isdir(rec.bkp_dir):
os.makedirs(rec.bkp_dir)
except:
raise
bkp_file='%s_%s.sql' % (rec.name, time.strftime('%Y%m%d_%H_%M_%S'))
file_path = os.path.join(rec.bkp_dir,bkp_file)
fp = open(file_path,'wb')
uri = 'http://' + rec.host + ':' + rec.port
conn = xmlrpclib.ServerProxy(uri + '/xmlrpc/db')
bkp=''
try:
bkp = execute(conn, 'dump', tools.config['admin_passwd'], rec.name)
except:
logger.notifyChannel('backup', netsvc.LOG_INFO, "Could'nt backup database %s. Bad database administrator password for server running at http://%s:%s" %(rec.name, rec.host, rec.port))
continue
bkp = base64.decodestring(bkp)
fp.write(bkp)
fp.close()
else:
logger.notifyChannel('backup', netsvc.LOG_INFO, "database %s doesn't exist on http://%s:%s" %(rec.name, rec.host, rec.port))
db_backup()

This is probably because an error occurs on the backend that does not get propagated back to the client. If you check the server logs at the time you take a backup you will probably see the issue.
Note If you need a script to get a backup from or restore a database to a v7, v8 or v9 server take a look at https://github.com/daramousk/odoo_remote_backup
I have developed a script for this specific reason which you can use or change to resolve your issue.

Related

Is there a way for me to duplicate my pymodbus server so that they can run in along side eachother without crashing?

I am new to modbus and pymodbus in general, this is the code i have written and it seems to run fine! for my project i would like to duplicate this code somehow and make it possible for them to run along side eachother without crashing.
For the end-user it should be as simple as this:
program:"how many servers would you like to run"
user: "3"
program "what are the adress and count of server 1"
user: "...."
program "what are the adress and count of server 2"
user: "...."
program "what are the adress and count of server 3"
user: "...."
and then they all run and it is possible to get updates alongside the program.
Here is the code i have written up until now:
import pymodbus
from pymodbus.version import version
from pymodbus.server.sync import StartSerialServer
from pymodbus.device import ModbusDeviceIdentification
from pymodbus.datastore import ModbusSequentialDataBlock
from pymodbus.datastore import ModbusSlaveContext, ModbusServerContext
from pymodbus.transaction import ModbusRtuFramer
from pymodbus.constants import Defaults
from pymodbus.client.sync import ModbusSerialClient as modclient
#import logging
#FORMAT = ('%(asctime)-15s %(threadName)-15s' ' %(levelname)-8s %(module)-15s:%(lineno)-8s % (message)s')
#logging.basicConfig(format=FORMAT)
#log = logging.getLogger()
#log.setLevel(logging.DEBUG)
store = ModbusSlaveContext(
di=ModbusSequentialDataBlock(0, [17] * 100), # adresse, verdi( di = digital inputs)
co=ModbusSequentialDataBlock(0, [17] * 100), # --_-- (Coils)
hr=ModbusSequentialDataBlock(0, [17] * 100), # --_-- (Holding registers)
ir=ModbusSequentialDataBlock(0, [17] * 100)) # --_-- (input registers)
identity = ModbusDeviceIdentification()
identity.VendorName = 'Pymodbus'
identity.ProductCode = 'PM'
identity.VendorUrl = 'http://github.com/riptideio/pymodbus/'
identity.ProductName = 'Pymodbus Server'
identity.ModelName = 'Pymodbus Server'
identity.MajorMinorRevision = version.short()
context = ModbusServerContext(slaves=store, single=True)
client = modclient(method="rtu",
port="COM2",
timeout=1000,
stopbits=1,
bytesize= 8,
parity= "N",
baudrate=9600)
client.connect()
while True: #kopier og send in
#input_reg = client.read_holding_registers(address=0,count=0,unit=0)
#co_reg = client.read_coils(address=, count=, unit=)
holding_reg = client.read_holding_registers(address=0,count=8,unit=1) #for addressen 1 osv
print(holding_reg)
#reg_read = holding_reg.registers[2] #leser tredje verdi i arrayet
This is what i get in my terminal when i run it: (the error happens if i dont send a value to it)
WriteRegisterResponse 0 => 6
WriteRegisterResponse 0 => 6
WriteRegisterResponse 0 => 6
Modbus Error: [Input/Output] No Response received from the remote unit/Unable to decode response
WriteRegisterResponse 0 => 6

How to mock sqlalchemy.engine.cursor.LegacyCursorResult?

I have the following class that makes connection to MSSQL Server instance in my project,
"""MSSql Connection class."""
import logging
import logging.config
import sqlalchemy
class MSSQLConnection:
_connection = None
_engine = None
def __init__(self, host, database, username, password):
connection_string = self.build_connection_string(
host, database, username, password)
self._engine = sqlalchemy.create_engine(
connection_string, fast_executemany=True,
isolation_level="READ COMMITTED")
def connect(self):
self._connection = self._engine.connect()
def get_result_tuple(self, table):
logger = logging.getLogger()
metadata = sqlalchemy.MetaData()
db_table = sqlalchemy.Table(
table, metadata, autoload=True, autoload_with=self._engine)
query = sqlalchemy.select([db_table])
transaction_id = self.get_transaction_id()
result_proxy = self._connection.execute(query)
return transaction_id, result_proxy
def get_transaction_id(self):
"""Get the transaction id."""
connection = self._connection
sql_query = sqlalchemy.text("SELECT CURRENT_TRANSACTION_ID()")
result = connection.execute(sql_query)
row = result.fetchone()
return row[0]
def build_connection_string(self, host, database, username, password):
connection_string = ('mssql+pyodbc://' + username + ':'
+ password + '#' + host + '/' + database
+ '?driver=ODBC+Driver+17+for+SQL+Server')
return connection_string
I would like to mock the method
get_result_tuple
that returns ´transaction_id´ and instance of sqlalchemy.engine.cursor.LegacyCursorResult.
How to mock sqlalchemy.engine.cursor.LegacyCursorResult and return some dummy data on the ResultProxy object?
The caller has the following code,
mssql_connection = MSSQLConnection(
host, database, username, password)
mssql_connection.connect()
result_tuple = mssql_connection.get_result_tuple(table)
transaction_id, result_proxy = result_tuple
logger.info(f'Transaction id = {transaction_id}')
current_date = date.today().strftime("%Y_%m_%d")
while True:
partial_results = result_proxy.fetchmany(rows_fetch_limit)
results_count = len(partial_results)
if (partial_results == [] or results_count == 0):
return
else:
// other logic
Please advise.

how to store bcrypt hashpw result in db correctly?

I am in the process of creating a login system.
I use python flask and as database the Prostgresql.
I think I just store the hash value wrong.
I have saved it as vachar 255 so far
My code:
from flask import Flask, render_template, redirect, request, url_for, session
from flask_sqlalchemy import SQLAlchemy
import bcrypt
import psycopg2
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = 'postgresql://postgres:PostGreSQL_13#localhost/test'
sql = SQLAlchemy(app)
#app.route('/')
def home():
return render_template("home.html")
#app.route('/register', methods=["GET","POST"])
def register():
if request.method == "GET":
return render_template("register.html")
else:
name = request.form['name']
email = request.form['email']
password = request.form['password'].encode('utf-8')
hash_password = bcrypt.hashpw(password, bcrypt.gensalt())
t_host = 'localhost'
t_port = "5432"
t_dbname = "test"
t_user = "postgres"
t_pw = "password"
db_conn = psycopg2.connect(host=t_host, port=t_port, dbname=t_dbname, user=t_user, password=t_pw)
db_cursor = db_conn.cursor()
db_cursor.execute("INSERT INTO users (UserName,UserEmail,UserPassword) VALUES (%s,%s,%s)",(name,email,hash_password,))
db_conn.commit()
session['name'] = name
session['email'] = email
return redirect(url_for("home"))
#app.route('/login', methods=["GET","POST"])
def login():
if request.method == "POST":
email = request.form['email']
password = request.form['password'].encode('utf-8')
t_host = 'localhost'
t_port = "5432"
t_dbname = "test"
t_user = "postgres"
t_pw = "password"
db_conn = psycopg2.connect(host=t_host, port=t_port, dbname=t_dbname, user=t_user, password=t_pw)
db_cursor = db_conn.cursor()
db_cursor.execute("SELECT username, useremail, userpassword FROM users WHERE useremail=%s",(email,))
user = db_cursor.fetchone()
db_conn.close()
if len(user) > 0:
name = user[0]
if bcrypt.hashpw(password, user[2].encode('utf-8')) == user[2].encode('utf-8'):
session['name'] = user[0]
session['email'] = user[1]
return render_template("home.html")
else:
return "Versuch es doch Nochmal"
else:
return render_template("login.html")
#app.route('/logout')
def logout():
session.clear()
return render_template("home.html")
if __name__ == '__main__':
app.secret_key = '012#!ApaAjaBoleh)(*^%'
app.run(debug=True)
the procedure I got from the Youtube video. see attachment.
I need a login system and this was recommended to me, it works pretty good.
The last one has to work somehow and if it works I would be very happy.
Can anyone tell me what I'm doing wrong or if my appendix with the database is right or wrong?
I know nothing about this language you're using, but any bcrypt implementation will output a string similar to:
$2a$12$ieXy2Rj/TEGqVRx0JihGFesujNFCdmlQWpUaTNvwQ0XuB3lzOcTWK
Yes, you should store that varchar string in your database.

How can I export Jira issues to BitBucket

Ive just moved my projects code from java.net to BitBucket. But my jira issue tracking is still hosted on java.net, although BitBucket does have some options for linking to an external issue tracker I don't think I can use it for java.net, not least because I do not have the admin priviledges need to install the DVCS connector.
So I thought an alternative option would be to export and then import the issues into BitBucket issue tracker, is that possible ?
Progress so far
So I tried following the steps in both informative answers using OSX below but I hit a problem - I'm rather confused about what the script would actually be called because in the answers it talks about export.py but no such script exists with that name so I renamed the one I downloaded.
sudo easy_install pip (OSX)
pip install jira
pip install configparser
easy_install -U setuptools
Go to https://bitbucket.org/reece/rcore, select downloads tab, download zip and unzip, and rename to reece ( for some reason git clone https://bitbucket.org/reece/rcore fails with error)
cd reece/rcore
Save script as export.py in rcore subfolder
Replace iteritems with items in import.py
Replace iteritems with types/immutabledict.py
Create .config in rcore folder
Create .config/jira-issues-move-to-bitbucket.conf containing
jira-username=paultaylor
jira-hostname=https://java.net/jira/browse/JAUDIOTAGGER
jira-password=password
Run python export.py --jira-project jaudiotagger
gives
macbook:rcore paul$ python export.py --jira-project jaudiotagger
Traceback (most recent call last):
File "export.py", line 24, in <module>
import configparser
ImportError: No module named configparser
- Run python export.py --jira-project jaudiotagger
I need to run pip insdtall as root so did
sudo pip install configparser
and that worked
but now
python export.py --jira.project jaudiotagger
gives
File "export.py" line 35, in <module?
from jira.client import JIRA
ImportError: No module named jira.client
You can import issues into BitBucket, they just need to be in the appropriate format. Fortunately, Reece Hart has already written a Python script to connect to a Jira instance and export the issues.
To get the script to run I had to install the Jira Python package as well as the latest version of rcore (if you use pip you get an incompatible previous version, so you have to get the source). I also had to replace all instances of iteritems with items in the script and in rcore/types/immutabledict.py to make it work with Python 3. You will also need to fill in the dictionaries (priority_map, person_map, etc) with the values your project uses. Finally, you need a config file to exist with the connection info (see comments at the top of the script).
The basic command line usage is export.py --jira-project <project>
Once you've got the data exported, see the instructions for importing issues to BitBucket
#!/usr/bin/env python
"""extract issues from JIRA and export to a bitbucket archive
See:
https://confluence.atlassian.com/pages/viewpage.action?pageId=330796872
https://confluence.atlassian.com/display/BITBUCKET/Mark+up+comments
https://bitbucket.org/tutorials/markdowndemo/overview
2014-04-12 08:26 Reece Hart <reecehart#gmail.com>
Requires a file ~/.config/jira-issues-move-to-bitbucket.conf
with content like
[default]
jira-username=some.user
jira-hostname=somewhere.jira.com
jira-password=ur$pass
"""
import argparse
import collections
import configparser
import glob
import itertools
import json
import logging
import os
import pprint
import re
import sys
import zipfile
from jira.client import JIRA
from rcore.types.immutabledict import ImmutableDict
priority_map = {
'Critical (P1)': 'critical',
'Major (P2)': 'major',
'Minor (P3)': 'minor',
'Nice (P4)': 'trivial',
}
person_map = {
'reece.hart': 'reece',
# etc
}
issuetype_map = {
'Improvement': 'enhancement',
'New Feature': 'enhancement',
'Bug': 'bug',
'Technical task': 'task',
'Task': 'task',
}
status_map = {
'Closed': 'resolved',
'Duplicate': 'duplicate',
'In Progress': 'open',
'Open': 'new',
'Reopened': 'open',
'Resolved': 'resolved',
}
def parse_args(argv):
def sep_and_flatten(l):
# split comma-sep elements and flatten list
# e.g., ['a','b','c,d'] -> set('a','b','c','d')
return list( itertools.chain.from_iterable(e.split(',') for e in l) )
cf = configparser.ConfigParser()
cf.readfp(open(os.path.expanduser('~/.config/jira-issues-move-to-bitbucket.conf'),'r'))
ap = argparse.ArgumentParser(
description = __doc__
)
ap.add_argument(
'--jira-hostname', '-H',
default = cf.get('default','jira-hostname',fallback=None),
help = 'host name of Jira instances (used for url like https://hostname/, e.g., "instancename.jira.com")',
)
ap.add_argument(
'--jira-username', '-u',
default = cf.get('default','jira-username',fallback=None),
)
ap.add_argument(
'--jira-password', '-p',
default = cf.get('default','jira-password',fallback=None),
)
ap.add_argument(
'--jira-project', '-j',
required = True,
help = 'project key (e.g., JRA)',
)
ap.add_argument(
'--jira-issues', '-i',
action = 'append',
default = [],
help = 'issue id (e.g., JRA-9); multiple and comma-separated okay; default = all in project',
)
ap.add_argument(
'--jira-issues-file', '-I',
help = 'file containing issue ids (e.g., JRA-9)'
)
ap.add_argument(
'--jira-components', '-c',
action = 'append',
default = [],
help = 'components criterion; multiple and comma-separated okay; default = all in project',
)
ap.add_argument(
'--existing', '-e',
action = 'store_true',
default = False,
help = 'read existing archive (from export) and merge new issues'
)
opts = ap.parse_args(argv)
opts.jira_components = sep_and_flatten(opts.jira_components)
opts.jira_issues = sep_and_flatten(opts.jira_issues)
return opts
def link(url,text=None):
return "[{text}]({url})".format(url=url,text=url if text is None else text)
def reformat_to_markdown(desc):
def _indent4(mo):
i = " "
return i + mo.group(1).replace("\n",i)
def _repl_mention(mo):
return "#" + person_map[mo.group(1)]
#desc = desc.replace("\r","")
desc = re.sub("{noformat}(.+?){noformat}",_indent4,desc,flags=re.DOTALL+re.MULTILINE)
desc = re.sub(opts.jira_project+r"-(\d+)",r"issue #\1",desc)
desc = re.sub(r"\[~([^]]+)\]",_repl_mention,desc)
return desc
def fetch_issues(opts,jcl):
jql = [ 'project = ' + opts.jira_project ]
if opts.jira_components:
jql += [ ' OR '.join([ 'component = '+c for c in opts.jira_components ]) ]
if opts.jira_issues:
jql += [ ' OR '.join([ 'issue = '+i for i in opts.jira_issues ]) ]
jql_str = ' AND '.join(["("+q+")" for q in jql])
logging.info('executing query ' + jql_str)
return jcl.search_issues(jql_str,maxResults=500)
def jira_issue_to_bb_issue(opts,jcl,ji):
"""convert a jira issue to a dictionary with values appropriate for
POSTing as a bitbucket issue"""
logger = logging.getLogger(__name__)
content = reformat_to_markdown(ji.fields.description) if ji.fields.description else ''
if ji.fields.assignee is None:
resp = None
else:
resp = person_map[ji.fields.assignee.name]
reporter = person_map[ji.fields.reporter.name]
jiw = jcl.watchers(ji.key)
watchers = [ person_map[u.name] for u in jiw.watchers ] if jiw else []
milestone = None
if ji.fields.fixVersions:
vnames = [ v.name for v in ji.fields.fixVersions ]
milestone = vnames[0]
if len(vnames) > 1:
logger.warn("{ji.key}: bitbucket issues may have only 1 milestone (JIRA fixVersion); using only first ({f}) and ignoring rest ({r})".format(
ji=ji, f=milestone, r=",".join(vnames[1:])))
issue_id = extract_issue_number(ji.key)
bbi = {
'status': status_map[ji.fields.status.name],
'priority': priority_map[ji.fields.priority.name],
'kind': issuetype_map[ji.fields.issuetype.name],
'content_updated_on': ji.fields.created,
'voters': [],
'title': ji.fields.summary,
'reporter': reporter,
'component': None,
'watchers': watchers,
'content': content,
'assignee': resp,
'created_on': ji.fields.created,
'version': None, # ?
'edited_on': None,
'milestone': milestone,
'updated_on': ji.fields.updated,
'id': issue_id,
}
return bbi
def jira_comment_to_bb_comment(opts,jcl,jc):
bbc = {
'content': reformat_to_markdown(jc.body),
'created_on': jc.created,
'id': int(jc.id),
'updated_on': jc.updated,
'user': person_map[jc.author.name],
}
return bbc
def extract_issue_number(jira_issue_key):
return int(jira_issue_key.split('-')[-1])
def jira_key_to_bb_issue_tag(jira_issue_key):
return 'issue #' + str(extract_issue_number(jira_issue_key))
def jira_link_text(jk):
return link("https://invitae.jira.com/browse/"+jk,jk) + " (Invitae access required)"
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
opts = parse_args(sys.argv[1:])
dir_name = opts.jira_project
if opts.jira_components:
dir_name += '-' + ','.join(opts.jira_components)
if opts.jira_issues_file:
issues = [i.strip() for i in open(opts.jira_issues_file,'r')]
logger.info("added {n} issues from {opts.jira_issues_file} to issues list".format(n=len(issues),opts=opts))
opts.jira_issues += issues
opts.dir = os.path.join('/','tmp',dir_name)
opts.att_rel_dir = 'attachments'
opts.att_abs_dir = os.path.join(opts.dir,opts.att_rel_dir)
opts.json_fn = os.path.join(opts.dir,'db-1.0.json')
if not os.path.isdir(opts.att_abs_dir):
os.makedirs(opts.att_abs_dir)
opts.jira_issues = list(set(opts.jira_issues)) # distinctify
jcl = JIRA({'server': 'https://{opts.jira_hostname}/'.format(opts=opts)},
basic_auth=(opts.jira_username,opts.jira_password))
if opts.existing:
issues_db = json.load(open(opts.json_fn,'r'))
existing_ids = [ i['id'] for i in issues_db['issues'] ]
logger.info("read {n} issues from {fn}".format(n=len(existing_ids),fn=opts.json_fn))
else:
issues_db = dict()
issues_db['meta'] = {
'default_milestone': None,
'default_assignee': None,
'default_kind': "bug",
'default_component': None,
'default_version': None,
}
issues_db['attachments'] = []
issues_db['comments'] = []
issues_db['issues'] = []
issues_db['logs'] = []
issues_db['components'] = [ {'name':v.name} for v in jcl.project_components(opts.jira_project) ]
issues_db['milestones'] = [ {'name':v.name} for v in jcl.project_versions(opts.jira_project) ]
issues_db['versions'] = issues_db['milestones']
# bb_issue_map: bb issue # -> bitbucket issue
bb_issue_map = ImmutableDict( (i['id'],i) for i in issues_db['issues'] )
# jk_issue_map: jira key -> bitbucket issue
# contains only items migrated from JIRA (i.e., not preexisting issues with --existing)
jk_issue_map = ImmutableDict()
# issue_links is a dict of dicts of lists, using JIRA keys
# e.g., links['CORE-135']['depends on'] = ['CORE-137']
issue_links = collections.defaultdict(lambda: collections.defaultdict(lambda: []))
issues = fetch_issues(opts,jcl)
logger.info("fetch {n} issues from JIRA".format(n=len(issues)))
for ji in issues:
# Pfft. Need to fetch the issue again due to bug in JIRA.
# See https://bitbucket.org/bspeakmon/jira-python/issue/47/, comment on 2013-10-01 by ssonic
ji = jcl.issue(ji.key,expand="attachments,comments")
# create the issue
bbi = jira_issue_to_bb_issue(opts,jcl,ji)
issues_db['issues'] += [bbi]
bb_issue_map[bbi['id']] = bbi
jk_issue_map[ji.key] = bbi
issue_links[ji.key]['imported from'] = [jira_link_text(ji.key)]
# add comments
for jc in ji.fields.comment.comments:
bbc = jira_comment_to_bb_comment(opts,jcl,jc)
bbc['issue'] = bbi['id']
issues_db['comments'] += [bbc]
# add attachments
for ja in ji.fields.attachment:
att_rel_path = os.path.join(opts.att_rel_dir,ja.id)
att_abs_path = os.path.join(opts.att_abs_dir,ja.id)
if not os.path.exists(att_abs_path):
open(att_abs_path,'w').write(ja.get())
logger.info("Wrote {att_abs_path}".format(att_abs_path=att_abs_path))
bba = {
"path": att_rel_path,
"issue": bbi['id'],
"user": person_map[ja.author.name],
"filename": ja.filename,
}
issues_db['attachments'] += [bba]
# parent-child is task-subtask
if hasattr(ji.fields,'parent'):
issue_links[ji.fields.parent.key]['subtasks'].append(jira_key_to_bb_issue_tag(ji.key))
issue_links[ji.key]['parent task'].append(jira_key_to_bb_issue_tag(ji.fields.parent.key))
# add links
for il in ji.fields.issuelinks:
if hasattr(il,'outwardIssue'):
issue_links[ji.key][il.type.outward].append(jira_key_to_bb_issue_tag(il.outwardIssue.key))
elif hasattr(il,'inwardIssue'):
issue_links[ji.key][il.type.inward].append(jira_key_to_bb_issue_tag(il.inwardIssue.key))
logger.info("migrated issue {ji.key}: {ji.fields.summary} ({components})".format(
ji=ji,components=','.join(c.name for c in ji.fields.components)))
# append links section to content
# this section shows both task-subtask and "issue link" relationships
for src,dstlinks in issue_links.iteritems():
if src not in jk_issue_map:
logger.warn("issue {src}, with issue_links, not in jk_issue_map; skipping".format(src=src))
continue
links_block = "Links\n=====\n"
for desc,dsts in sorted(dstlinks.iteritems()):
links_block += "* **{desc}**: {links} \n".format(desc=desc,links=", ".join(dsts))
if jk_issue_map[src]['content']:
jk_issue_map[src]['content'] += "\n\n" + links_block
else:
jk_issue_map[src]['content'] = links_block
id_counts = collections.Counter(i['id'] for i in issues_db['issues'])
dupes = [ k for k,cnt in id_counts.iteritems() if cnt>1 ]
if dupes:
raise RuntimeError("{n} issue ids appear more than once from existing {opts.json_fn}".format(
n=len(dupes),opts=opts))
json.dump(issues_db,open(opts.json_fn,'w'))
logger.info("wrote {n} issues to {opts.json_fn}".format(n=len(id_counts),opts=opts))
# write zipfile
os.chdir(opts.dir)
with zipfile.ZipFile(opts.dir + '.zip','w') as zf:
for fn in ['db-1.0.json']+glob.glob('attachments/*'):
zf.write(fn)
logger.info("added {fn} to archive".format(fn=fn))
NOTE: I'm writing a new answer because writing this in a comment would be horrible, but most of the credit goes to #Turch's answer.
My steps (in OSX and Debian machines, both worked fine):
apt-get install python-pip (Debian) or sudo easy_install pip (OSX)
pip install jira
pip install configparser
easy_install -U setuptools (not sure if really needed)
Download or clone the source code from https://bitbucket.org/reece/rcore/ in your home folder, for example. Note: don't download using pip, it will get the 0.0.2 version and you need the 0.0.3.
Download the Python script created by Reece, mentioned by #Turch, and place it inside of the rcore folder.
Follow the instructions by #Turch: I also had to replace all instances of iteritems with items in the script and in rcore/types/immutabledict.py to make it work with Python 3. You will also need to fill in the dictionaries (priority_map, person_map, etc) with the values your project uses. Finally, you need a config file to exist with the connection info (see comments at the top of the script). Note: I used hostname like jira.domain.com (no http or https).
(This change did the trick for me) I had to change part of the line 250 from 'https://{opts.jira_hostname}/' to 'http://{opts.jira_hostname}/'
To finish, run the script like #Turch mentioned: The basic command line usage is export.py --jira-project <project>
The file was placed in /tmp/.zip for me.
The file was perfectly accepted in the BitBucket importer today.
Hooray for Reece and Turch! Thanks guys!

McAfee Update download script

I'm setting PC with McAfee install on them and be told that I need to stop the program going on line to download update (DAT). I need to create a script to download dat file from McAfee web site and put this file on server where McAfee can access and install this.
Has anyone done this in past.
I actually have done this. I haven't tested this script in a year or two but here is what I was using. This isn't written in Powershell but if you change the directories I think this can run on Windows.
#!/usr/bin/python
import ftplib
import tarfile
import shutil
import os
import re
import time
scannerDir = "/usr/local/uvscan/"
tmp = "/tmp/avscanner/"
def downloadDat():
datfile = ""
r = re.compile("^avvdat")
ftp = ftplib.FTP("ftp.nai.com", "anonymous", "email#yourdomain.com")
ftp.cwd("/pub/datfiles/english")
list = ftp.nlst()
for x in list:
if r.search(x):
datFile = x
f = open(tmp + "datfile", 'wb')
ftp.retrbinary("RETR " + datFile, f.write)
f.close()
ftp.quit()
def unpackDat():
tFile = tarfile.open(tmp + "datfile", 'r')
for f in tFile.getnames():
tFile.extract(f, tmp)
def createDirs():
if os.path.isdir(tmp) == False:
os.mkdir(tmp, 0700)
os.chown(tmp, 0, 95)
os.chmod(tmp, 0755)
def doCleanup():
shutil.rmtree(tmp)
def installFiles():
shutil.copyfile(tmp + "/avvclean.dat", scannerDir + "/avvclean.dat")
shutil.copyfile(tmp + "/avvnames.dat", scannerDir + "/avvnames.dat")
shutil.copyfile(tmp + "/avvscan.dat", scannerDir + "/avvscan.dat")
def isOld():
if os.path.isfile(scannerDir + "/avvclean.dat"):
if time.time() - os.path.getctime(scannerDir + "/avvclean.dat") < 80000:
return True
else:
return False
else:
return True
def main():
if isOld():
createDirs()
downloadDat()
unpackDat()
installFiles()
doCleanup()
if __name__ == "__main__":
main()