How an instance of a module created inside a class can inherit from this class (Python 3)? - class

'I want to build a module containing class AnimationCanvas inside (anim.py). And I want to call this module in a separate main.py file where the data (a variable) is changing (potentially with GUI). The instance animatedAxes would automatically update its plot by taking data from the variables in main.py file, while the main.py code is running (a sort of parallel processes).'
'Problem: the instance of class AnimationCanvas from the module does not see the variables of the class main in the main.py file.
I know how to do it if the class AnimationCanvas and the class main are in the same file. However I want to have an animation module (separate file), which can be used anywhere, just by importing it and writing a couple of lines.'
'I can call __init__ function of the class AnimationCanvas and pass the variables into it, but then it is a one-time effect, and if the variables change in class main, then animatedAxes instance will not see this change.'
'Single file which works (single.py):'
import matplotlib.pyplot as plt
from matplotlib.animation import TimedAnimation
from matplotlib.lines import Line2D
import numpy as np
class main():
def __init__(self):
self.size=800
main.data=np.random.rand(self.size)
# initialize animated graph routine
self.animatedAxes = AnimationCanvas()
# run random data array
for ii in range(20):
main.data=np.random.rand(self.size)
plt.pause(0.1)
class AnimationCanvas(TimedAnimation):
def __init__(self):
# initialize random data array
self.data = np.random.rand(5)
# Create animation axis and figure
self.fig = plt.figure(1, figsize=(5, 5))
self.ax = plt.axes([0.1, 0.1, 0.8, 0.8])
self.line1 = Line2D([], [], color='blue')
self.ax.add_line(self.line1)
# start animation with interval of 10 milliseconds
TimedAnimation.__init__(self, self.fig, interval=10, blit=True)
def new_frame_seq(self):
return iter(range(5*5))
def _step(self, *args):
try:
TimedAnimation._step(self, *args)
except Exception as e:
TimedAnimation._stop(self)
pass
def _draw_frame(self, framedata):
# update self.data
self.data=main.data
# update plot with self.data
self.line1.set_data(np.arange(len(self.data)),self.data)
if __name__ == '__main__':
main()
'Two files which do not work:'
'main.py:'
import matplotlib.pyplot as plt
import numpy as np
from anim import AnimationCanvas
class main():
def __init__(self):
self.size=800
self.data=np.random.rand(self.size)
# initialize animated graph routine
self.animatedAxes = AnimationCanvas()
# run random data array
for ii in range(20):
print(ii)
self.data=np.random.rand(self.size)
plt.pause(0.1)
if __name__ == '__main__':
main()
'anim.py:'
import matplotlib.pyplot as plt
from matplotlib.animation import TimedAnimation
from matplotlib.lines import Line2D
import numpy as np
class AnimationCanvas(TimedAnimation):
def __init__(self):
# initialize random data array
self.data = np.random.rand(5)
# Create animation axis and figure
self.fig = plt.figure(1, figsize=(5, 5))
self.ax = plt.axes([0.1, 0.1, 0.8, 0.8])
self.line1 = Line2D([], [], color='blue')
self.ax.add_line(self.line1)
# start animation with interval of 10 milliseconds
TimedAnimation.__init__(self, self.fig, interval=10, blit=True)
def new_frame_seq(self):
return iter(range(5*5))
def _step(self, *args):
try:
TimedAnimation._step(self, *args)
except Exception as e:
TimedAnimation._stop(self)
pass
def _draw_frame(self, framedata):
'update self.data:'
'????????????????'
'update plot with self.data'
self.line1.set_data(np.arange(len(self.data)),self.data)
'I tried to use super(AnimationCanvas,self).__init__() but it does not work.'
'In my understanding I need a direct connection between self of the class main and self of class AnimationCanvas. Any suggestions are appreciated. Thanks.'

Related

pytest - mockup a complex module import

I have found several posts on how to "hide" a package and simulate an ImportError with pytest, however, I haven't succeeded in my case and I am looking for some help:
Test for import of optional dependencies in __init__.py with pytest: Python 3.5 /3.6 differs in behaviour
Test behavior of code if optional module is not installed
and related
Here is the content of an __about__.py file that I want to test with pytest.
"""Get the metadata from the package or from setup.py."""
try:
import importlib
metadata = importlib.metadata
except ImportError:
import importlib_metadata as metadata
try:
data = metadata.metadata("mypackage")
__version__ = data["Version"]
__author__ = data["Author"]
__name__ = data["Name"]
except metadata.PackageNotFoundError:
# The repo of the package is accessible to python to get at least the version
import re
from pathlib import Path
try:
from nested_grid_plotter import __file__ as loc
with open(Path(loc).parent.joinpath("../setup.py"), "r") as f:
data = f.read()
except FileNotFoundError:
data = ""
def version_parser(v):
"""Parse the version from the setup file."""
version_pattern = (
r"""(version\s*=\s*)["|'](\d+(=?\.(\d+(=?\.(\d+)*)*)*)*)["|']"""
)
regex_matcher = re.compile(version_pattern).search(v)
if regex_matcher is None:
return "unknwon"
return regex_matcher.group(2)
try:
__version__ = version_parser(data)
except Exception:
__version__ = "unknown"
__author__ = "unknown"
__name__ = "unknown"
Here is the __init__.py at the root of the package:
from .__about__ import __version__, __name__, __author__
And here is the tests that I have come up with until now. However, I am not able to hide importlib.
"""Test the file __about__.py."""
import pytest
import sys
class PackageDiscarder:
def __init__(self):
self.pkgnames = []
def find_spec(self, fullname, path, target=None):
if fullname in self.pkgnames:
raise ImportError()
#pytest.fixture
def no_requests():
sys.modules.pop("importlib", None)
d = PackageDiscarder()
d.pkgnames.append("importlib")
sys.meta_path.insert(0, d)
yield
sys.meta_path.remove(d)
#pytest.fixture(autouse=True)
def cleanup_imports():
yield
sys.modules.pop("mypackage", None)
def test_requests_available():
import mypackage
assert mypackage.__version__ != "unknwon"
#pytest.mark.usefixtures("no_requests")
def test_requests_missing():
import mypackage
assert mypackage.__version__ != "unknwon"
Here is the coverage report:
Name Stmts Miss Cover Missing
----------------------------------------------------------------
mypackage/__about__.py 31 10 68% 5-6, 10-12, 23-24, 33, 38-39
----------------------------------------------------------------
TOTAL 31 10 68%

locust 0.9 to 1.3 Exception: No tasks defined. use the #task decorator or set the tasks property of the User

I have the following code which run fine in locust 0.9. Now with 1.3 it throws the exception mentioned in the title. Can anyone see what's wrong?
import time
import random
import datetime
import requests
from requests.packages.urllib3.exceptions import InsecureRequestWarning
import logging
import json
import os
from random import randint, choice
from locust import HttpUser, TaskSet, task
from pyquery import PyQuery
requests.packages.urllib3.disable_warnings()
class FrontPage(TaskSet):
def on_start(self):
self.client.verify = False
#task(20)
def index(self):
self.client.get("/")
class DestinationPagesFixed(TaskSet):
de_paths = ["/belgien", "daenemark", "deutschland", "frankreich", "griechenland"
, "italien"
, "luxemburg"
]
def on_start(self):
self.client.verify = False
#task
def test_1(self):
paths = self.de_paths
path = choice(paths)
self.client.get(path, name="Static page")
class UserBehavior(TaskSet):
tasks = {FrontPage: 15, DestinationPagesFixed: 19}
class WebsiteUser(HttpUser):
task_set = UserBehavior
min_wait = 400
max_wait = 10000
Change
task_set = UserBehavior
to
tasks = [UserBehavior]
Or (skipping your UserBehaviour class entirely)
tasks = {FrontPage: 15, DestinationPagesFixed: 19}

import class with pandas from file

I have two files:
main.py
from personal_function.Convert import cls_Convert
df_key = 'PERFIL'
a = cls_Convert(df_data_2)
a.convert_cat_to_num(df_key)
df_data_2 = a.df
personal_function/Convert.py
import pandas as pd
class cls_Convert:
def __init__(self,df):
self.df = df
# Mudar variavel categorica para numerica
def convert_cat_to_num(self,df_key):
self.df[df_key] = pd.factorize(self.df[df_key],sort=True)[0] + 1
return self.df
# Mudar variavel numerica para categorica
def convert_num_to_cat(self,df_key,cat_bin,cat_label):
self.df[df_key].replace(to_replace = cat_bin, value =cat_label, inplace=True)
return self.df
however I get this error
ImportError: cannot import name 'cls_Convert' from 'personal_function.Convert'
For a class or function to be visible outside of a package, it must be imported in the package's __init__.py file which is run when the package is imported from somewhere. All the variables, imports, method, and classes defined in that __init__.py are then made visible to the package that was importing them. Take the below example:
example/example.py
def visible():
pass
def not_visible():
pass
example/init.py
from .ex import visible
main.py
from example import visible
from example import not_visible # results in an error since it was not imported
# in the `example` package's `__init__.py` file.
To make your Convert class visible to the external main.py file, create the __init__.py for the package.
You can read more about python submoduling here
I need to use
from os import getcwd
from sys import path
cwd = getcwd()
path.append(cwd)
than use the whole absolute path

Error in running Apache Beam Python SplittableDoFn

Error encountered while trying pubsub io > splittable dofn
RuntimeError: Transform node
AppliedPTransform(ParDo(TestDoFn)/ProcessKeyedElements/GroupByKey/GroupByKey,
_GroupByKeyOnly) was not replaced as expected.
Can someone help me with reviewing the code for anything I might be doing incorrectly in there
Code:
"""
python examples/test_restriction_unbounded.py --project mk2 --topic projects/mk2/topics/testing
"""
# pytype: skip-file
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import csv
import logging
import sys
import time
from datetime import datetime
import apache_beam as beam
from apache_beam.options.pipeline_options import GoogleCloudOptions
from apache_beam.options.pipeline_options import PipelineOptions
from apache_beam.options.pipeline_options import SetupOptions
from apache_beam.options.pipeline_options import StandardOptions
from apache_beam.io.restriction_trackers import OffsetRestrictionTracker, OffsetRange
from apache_beam.transforms.core import RestrictionProvider
class TestProvider(RestrictionProvider):
def initial_restriction(self, element):
return OffsetRange(0, 1)
def create_tracker(self, restriction):
return OffsetRestrictionTracker(restriction)
def restriction_size(self, element, restriction):
return restriction.size()
class TestDoFn(beam.DoFn):
def process(
self,
element,
restriction_tracker=beam.DoFn.RestrictionParam(
TestProvider())):
import pdb; pdb.set_trace()
cur = restriction_tracker.current_restriction().start
while restriction_tracker.try_claim(cur):
return element
def run(argv=None, save_main_session=True):
parser = argparse.ArgumentParser()
parser.add_argument('--topic', type=str, help='Pub/Sub topic to read from')
args, pipeline_args = parser.parse_known_args(argv)
options = PipelineOptions(pipeline_args)
options.view_as(StandardOptions).streaming = True
with beam.Pipeline(options=options) as p:
# data = ['abc', 'defghijklmno', 'pqrstuv', 'wxyz']
# actual = (p | beam.Create(data) | beam.ParDo(ExpandingStringsDoFn()))
scores = p | beam.io.ReadFromPubSub(topic=args.topic) | beam.ParDo(TestDoFn())
if __name__ == '__main__':
logging.getLogger().setLevel(logging.INFO)
run()
You are ingesting data from pub/sub by steaming. Then you have to create batches by window before apply this kind of transforms: (ParDo(TestDoFn)/ProcessKeyedElements/GroupByKey/GroupByKey, _GroupByKeyOnly)
Pub/Sub with window example: https://cloud.google.com/pubsub/docs/pubsub-dataflow
Try to do like this:
class GroupWindowsIntoBatches(beam.PTransform):
"""A composite transform that groups Pub/Sub messages
"""
def __init__(self, window_size):
# Convert minutes into seconds.
self.window_size = int(window_size * 60)
def expand(self, pcoll):
return (
pcoll
# Assigns window info to each Pub/Sub message based on its
# publish timestamp.
| "Window into Fixed Intervals"
>> beam.WindowInto(window.FixedWindows(self.window_size))
)
def run(argv=None, save_main_session=True):
parser = argparse.ArgumentParser()
parser.add_argument('--topic', type=str, help='Pub/Sub topic to read from')
args, pipeline_args = parser.parse_known_args(argv)
options = PipelineOptions(pipeline_args)
options.view_as(StandardOptions).streaming = True
window_size = 1.0
with beam.Pipeline(options=options) as p:
scores = (p
| beam.io.ReadFromPubSub(topic=args.topic)
| "WindowInto" >> GroupWindowsIntoBatches(window_size)
| beam.ParDo(TestDoFn())
)
I had the same error. Removing the streaming option solved the problem for me.

mocking snowflake connection

I have a SnowflakeApi class in python which just works as a wrapper on top of the SnowflakeConnection class. My SnowflakeApi is
import logging
import os
from snowflake.connector import connect
class SnowflakeApi(object):
"""
Wrapper to handle snowflake connection
"""
def __init__(self, account, warehouse, database, user, pwd):
"""
Handles snowflake connection. Connection must be closed once it is no longer needed
:param account:
:param warehouse:
:param database:
"""
self.__acct = self._account_url(account)
self.__wh = warehouse
self.__db = database
self.__connection = None
self.__user = user
self.__pwd = pwd
def __create_connection(self):
try:
# set the proxy here
conn = connect(
account=self.__acct
, user=self.__user
, password=self.__pwd
, warehouse=self.__wh
, database=self.__db
)
return conn
except:
raise Exception(
"Unable to connect to snowflake for user: '{0}', warehouse: '{1}', database: '{2}'".format(
self.__user, self.__wh, self.__db))
def get_connection(self):
"""
Gets a snowflake connection. If the connection has already been initialised it is returned
otherwise a new connection is created
:param credentials_func: method to get database credentials.
:return:
"""
try:
if self.__connection is None:
self.__connection = self.__create_connection()
return self.__connection
except:
raise Exception("Unable to initalise Snowflake connection")
def close_connection(self):
"""
Closes snowflake connection.
:return:
"""
self.__connection.close()
Namespace for SnowflakeApi is connection.snowflake_connection.SnowflakeApi (i.e. i have snowflake_connection.py in a folder called connections)
I want to write unit tests for this class using pytest and unittest.mock. The problem is I want to mock 'connect' so that a MagicMock object is returned and no database call is made. So far I have tried:
monkeypatch.setattr(connections.snowflake_connection,"connect",return_value = "")
Changed my original class to just import snowflake. I then created a mock object and used monkeypatch.setattr(snowflake_connection,"snowflake",my_mock_snowflake). That didn't work either
In short, I have tried a couple of other things but nothing has worked. All I want to do is mock snowflake connection so no actual database call is made.
Here is another way where we are mocking snowflake connector, cursor and fetch_all using python mock and patch.
import mock
import unittest
from datetime import datetime, timedelta
import feed_daily_report
class TestFeedDailyReport(unittest.TestCase):
#mock.patch('snowflake.connector.connect')
def test_compare_partner(self, mock_snowflake_connector):
tod = datetime.now()
delta = timedelta(days=8)
date_8_days_ago = tod - delta
query_result = [('partner_1', date_8_days_ago)]
mock_con = mock_snowflake_connector.return_value
mock_cur = mock_con.cursor.return_value
mock_cur.fetchall.return_value = query_result
result = feed_daily_report.main()
assert result == True
An example using unittest.mock and patching the connection:
from unittest import TestCase
from unittest.mock import patch
from connection.snowflake_connection import SnowflakeApi
class TestSnowFlakeApi(TestCase):
#patch('connection.snowflake_connection.connect')
def test_get_connection(self, mock_connect)
api = SnowflakeApi('the_account',
'the_warehouse',
'the_database',
'the_user',
'the_pwd')
api.get_connection()
mock_connect.assert_called_once_with(account='account_url', # Will be the output of self._account_url()
user='the_user',
password='the_pwd',
warehouse='the_warehouse',
database='the_database')
If you're testing other classes that use your SnowFlakeApi wrapper, then you should use the same approach, but patch the SnowFlakeApi itself in those tests.
from package.module.SomeClassThatUsesSnowFlakeApi
class TestSomeClassThatUsesSnowFlakeApi(TestCase):
#patch('package.module.SnowFlakeApi')
def test_some_func(self, mock_api):
instance = SomeClassThatUsesSnowFlakeApi()
instance.do_something()
mock_api.assert_called_once_with(...)
mock_api.return_value.get_connection.assert_called_once_with()
Also note that if you're using Python 2, you will need to pip install mock and then from mock import patch.
Using stubbing and dependency injection
from ... import SnowflakeApi
def some_func(*args, api=None, **kwargs):
api = api or SnowflakeApi(...)
conn = api.get_connection()
# Do some work
return result
Your test
class SnowflakeApiStub(SnowflakeApi)
def __init__(self):
# bypass super constructor
self.__connection = MagicMock()
def test_some_func():
stub = SnowflakeApiStub()
mock_connection = stub.__connection
mock_cursor = mock_connection.cursor.return_value
expect = ...
actual = some_func(api=stub)
assert expect == actual
assert mock_cursor.execute.called
An example using cursor, execute, and fetchone.
import snowflake.connector
class AlongSamePolly:
def __init__(self, conn):
self.conn = conn
def row_count(self):
cur = self.conn.cursor()
query = cur.execute('select count(*) from schema.table;')
return query.fetchone()[0] # returns (12345,)
# I like to dependency inject the snowflake connection object in my classes.
# This lets me use Snowflake Python Connector's built in context manager to
# rollback any errors and automatically close connections. Then you don't have
# try/except/finally blocks everywhere in your code.
#
if __name__ == '__main__':
with snowflake.connector.connect(user='user', password='password') as con:
same = AlongSamePolly(con)
print(same.row_count())
# => 12345
In the unittests you mock out the expected method calls - cursor(), execute(),
fetchone() and define the return value to follow up the chain of defined mocks.
import unittest
from unittest import mock
from along_same_polly import AlongSamePolly
class TestAlongSamePolly(unittest.TestCase):
def test_row_count(self):
with mock.patch('snowflake.connector.connect') as mock_snowflake_conn:
mock_query = mock.Mock()
mock_query.fetchone.return_value = (123,)
mock_cur = mock.Mock()
mock_cur.execute.return_value = mock_query
mock_snowflake_conn.cursor.return_value = mock_cur
same = AlongSamePolly(mock_snowflake_conn)
self.assertEqual(same.row_count(), 123)
if __name__ == '__main__':
unittest.main()
The following Solution Worked for me.
def test_connect(env_var_setup, monkeypatch):
monkeypatch.setattr(snowflake.connector.connection.SnowflakeConnection,
"connect", mocked_sf_connect
)
# calling snowflake connector method
file_job_map(env_var_setup).connect()
#mocked connection
def mocked_sf_connect(self, **kwargs):
print("Connection Successfully Established")
return True