jsPDF not defined Ionic 3 - ionic-framework

I'm trying to use the jspdf-autotable module which I installed via
npm install jspdf jspdf-autotable
To use the module in my Ionic component I did the following:
declare let jsPDF;
I then proceed with some sample code taken from the jspdf-autotable repo in my component:
createReport() {
let columns = ["ID", "Name", "Age", "City"]
let data = [
[1, "Jonathan", 25, "Gothenburg"],
[2, "Simon", 23, "Gothenburg"],
[3, "Hanna", 21, "Stockholm"]
]
let doc = new jsPDF('p', 'pt');
doc.autoTable(columns, data);
doc.save("table.pdf");
}
upon calling createReport() I get the following error message however: ReferenceError: jsPDF is not defined
How can I correctly import jspdf-autotable? Any help would be highly appreciated

You need to import the plugin and declare jsPDF as a global variable in your component.
import * as jsPDF from 'jspdf'
declare var jsPDF: any;
#Component({
selector: 'app-root',
templateUrl: './app.component.html',
styleUrls: ['./app.component.css']
})

This ended up working for me:
import * as jsPDF from 'jspdf'
import 'jspdf-autotable'
For some reason only specifying rows and columns as follows would work:
var columns = [
{title: "ID", dataKey: "id"},
{title: "Name", dataKey: "name"},
{title: "Country", dataKey: "country"},
...
];
var rows = [
{"id": 1, "name": "Shaw", "country": "Tanzania", ...},
{"id": 2, "name": "Nelson", "country": "Kazakhstan", ...},
{"id": 3, "name": "Garcia", "country": "Madagascar", ...},
...
];

Related

plotly mapbox - create clusters in mapview

I am building Dash App that uses plotly scattermapbox graph object. In the current map view each point is represented as a circle. As a user zooms-in and out, I'd like to cluster the points and create groupings. Here's my code for reference.
import dash
from dash import dcc
import pandas as pd
df = pd.DataFrame({
'x': [1, 2, 3],
'Lat': [37.774322, 37.777035, 37.773033],
'Long': [-122.489761, -122.485555, -122.491220]
})
layout = html.Div(
dcc.Graph(id="map"),
dcc.Input(id="inp")
)
#app.callback(
Output('map','figure'),
Input('inp','value')
)
def fin(val):
# do something
data = []
data.append({
"type": "scattermapbox",
"lat": df["Lat"],
"lon": df["Long"],
"name": "Location",
"showlegend": False,
"hoverinfo": "text",
"mode": "markers",
"clickmode": "event+select",
"customdata": df.loc[:,cd_cols].values,
"marker": {
"symbol": "circle",
"size": 8,
"opacity": 0.7,
"color": "black"
}
}
)
layout = {
"autosize": True,
"hovermode": "closest",
"mapbox": {
"accesstoken": MAPBOX_KEY,
"bearing": 0,
"center": {
"lat": xxx,
"lon": xxx
},
"pitch": 0,
"zoom": zoom,
"style": "satellite-streets",
},
}
return ({'data': data, 'layout': layout})
try using plotly.graph_objects.scattermapbox.Cluster. Hope this helps:
from dash import dcc, html, Dash, Output, Input
import pandas as pd
import plotly.graph_objects as go
app = Dash(__name__)
df = pd.DataFrame({
'x': [1, 2, 3],
'Lat': [37.774322, 37.777035, 37.773033],
'Long': [-122.489761, -122.485555, -122.491220]
})
#app.callback(
Output('map','figure'),
Input('inp','value')
)
def fin(val):
data = []
data.append({
"type": "scattermapbox",
"lat": df["Lat"],
"lon": df["Long"],
"name": "Location",
"showlegend": False,
"hoverinfo": "text",
"mode": "markers",
"clickmode": "event+select",
"customdata": df.loc[:,['Lat', 'Long']].values,
"marker": {
"symbol": "circle",
"size": 8,
"opacity": 0.7,
"color": "black"
},
"cluster": {'maxzoom': 14}
}
)
layout = {
"autosize": True,
"hovermode": "closest",
"mapbox": {
"bearing": 0,
"center": {
"lat": 37.774322,
"lon": -122.489761
},
"pitch": 0,
"zoom": 7,
"style": "open-street-map",
},
}
return ({'data': data, 'layout': layout})
app.layout = html.Div(
[dcc.Graph(id="map"),
dcc.Input(id="inp")]
)
if __name__ == '__main__':
app.run_server(debug=True)
Notice the added cluster parameters I added to data.
p.s - make sure you are using a new version of dash for this to work. I used the latest version - dash-2.7.1.

AWS Glue pySpark Filter & Manual Mapping of Several Columns

I'm using AWS Glue Studio with DynamicFrameCollections. I created a custom transformation where I am looking to filter by multiple columns and modify 2 column in the row based off a static mapping list. I'm struggling to figure out what the most efficient way to do this - pandas, udfs, or something completely different?
Consider the sample dataframe:
data = [{"Category": 'A', "Subcategory": 2, "Value": 121.44, "Properties": {}},
{"Category": 'B', "Subcategory": 2, "Value": 300.01, "Properties": None},
{"Category": 'C', "Subcategory": 3, "Value": 10.99, "Properties": { "Active":True } },
{"Category": 'E', "Subcategory": 4, "Value": 33.87, "Properties": { "Active":True, "ReadOnly": False }},
{"Category": 'E', "Subcategory": 1, "Value": 11.37, "Properties": { "Active":True }}
]
df = spark.createDataFrame(data)
I need to filter and transform by Category and Subcategory. Below is the sample mapping with the key as the category and subcategory merged while the first value in the array must be created as a new column ActivityName and the second values must be merged with the Properties column:
mapping= {"A2": ["EatingFood", { "Visible": True }],
"A3": ["DrinkingWater", { "Visible": False }],
"B2": ["Sleep", { "Visible": False }],
"C3": ["Exercise", { "Visible": False }],
"E4": ["Running", { "Visible": False }],
}
The output data I am expecting is:
resultingData = [{"Category": 'A', "Subcategory": 2, "ActivityName":"EatingFood", "Value": 121.44, "Properties": { "Visible": True }},
{"Category": 'B', "Subcategory": 2, "ActivityName":"Sleep", "Value": 300.01, "Properties": {"Visible": False}},
{"Category": 'C', "Subcategory": 3, "ActivityName":"Exercise", "Value": 10.99, "Properties": { "Active":True, "Visible": False } },
{"Category": 'E', "Subcategory": 4, "ActivityName":"Running", "Value": 33.87, "Properties": { "Active":True, "ReadOnly": False, "Visible": False }}
]
Note that the last data entry, E1 is missing because it was not in my mapping filter.
Is there any way to achieve this? I have a large list of items that I need to manually filter/map/transform like this. Thank you.
I got this working by transforming the dynamicframe into a dataframe and processing it using glue functions. Here's what I did:
def FilterAndMap (glueContext, dfc) -> DynamicFrameCollection:
from pyspark.sql.types import StringType, ArrayType
from awsglue.dynamicframe import DynamicFrame
import pyspark.sql.functions as f
import json
mapping= {"A2": ["EatingFood", json.dumps({ "Visible": True })],
"A3": ["DrinkingWater", json.dumps({ "Visible": False })],
"B2": ["Sleep", json.dumps({ "Visible": False })],
"C3": ["Exercise", json.dumps({ "Visible": False })],
"E4": ["Running", json.dumps({ "Visible": False })],
}
df = dfc.select(list(dfc.keys())[0]).toDF()
def func_filter_udf(concat_str):
return mapping[concat_str]
def func_map_udf(map_str):
if map_str[1]:
map_string = json.loads(map_str[0])
ret_val = json.dumps({**map_string, **json.loads(map_str[1])})
else:
ret_val = map_str[0]
return ret_val
filter_udf = f.udf(func_filter_udf, ArrayType(StringType()))
map_udf = f.udf(func_map_udf, StringType())
df = df.filter(f.concat("Category", "Subcategory").isin([*mapping]))
df = df.withColumn("concat_col", filter_udf(f.concat("Category", "Subcategory")))
df = (df.withColumn("ActivityName", df.concat_col[0]).
withColumn("Properties", map_udf(f.struct(df.concat_col[1], df.Properties))))
df = df.drop("concat_col")
dyf_processed = DynamicFrame.fromDF(df, glueContext, "filtered")
return(DynamicFrameCollection({"filtered": dyf_processed }, glueContext))

Flutter orderby json data

I have data like:
[
{name: Imamat, short_name: Lev, chapters: 27, name_en: Leviticus, type: old, order: 3},
{name: Kejadian, short_name: Gen, chapters: 50, name_en: Genesis, type: old, order: 1},
//....
]
I need to return this data sorted by order value like 1,2,3,4,.... Here is my function that returns results in up:
readDataBase() async {
String data = await DefaultAssetBundle.of(context).loadString("assets/db/tb.json");
final jsonResult = jsonDecode(data);
return jsonResult['book'];
}
This should do the trick efficiently;
List items = [
{"name": "Imamat", "short_name": "Lev", 'chapters': 27, "name_en": "Leviticus", "type": "old", "order": 3},
{"name": "Kejadian", "short_name": "Gen", "chapters": 50, "name_en": "Genesis", "type": "old", "order": 1},
];
getOrderNo (e)=>e["order"];
items.sort((a, b) => getOrderNo(a).compareTo(getOrderNo(b)));
print(items);

Pyspark with AWS Glue join 1-N relation into a JSON array

Don't know how can I join 1-N relations on AWS Glue and export a JSON file like:
{"id": 123, "name": "John Doe", "profiles": [ {"id": 1111, "channel": "twitter"}, {"id": 2222, "channel": "twitter"}, {"id": 3333, "channel": "instagram"} ]}
{"id": 345, "name": "Test", "profiles": []}
The profiles JSON array should be created using the other tables. Also I would like to add the channel column too.
The 3 tables that I have on AWS Glue data catalog are:
person_json
{"id": 123,"nanme": "John Doe"}
{"id": 345,"nanme": "Test"}
instagram_json
{"id": 3333, "person_id": 123}
{"id": 3333, "person_id": null}
twitter_json
{"id": 1111, "person_id": 123}
{"id": 2222, "person_id": 123}
This is the script I have so far:
import sys
from awsglue.transforms import *
from awsglue.utils import getResolvedOptions
from pyspark.context import SparkContext
from pyspark.sql.functions import lit
from awsglue.context import GlueContext
from awsglue.job import Job
glueContext = GlueContext(SparkContext.getOrCreate())
# catalog: database and table names
db_name = "test_database"
tbl_person = "person_json"
tbl_instagram = "instagram_json"
tbl_twitter = "twitter_json"
# Create dynamic frames from the source tables
person = glueContext.create_dynamic_frame.from_catalog(database=db_name, table_name=tbl_person)
instagram = glueContext.create_dynamic_frame.from_catalog(database=db_name, table_name=tbl_instagram)
twitter = glueContext.create_dynamic_frame.from_catalog(database=db_name, table_name=tbl_twitter)
# Join the frames
joined_instagram = Join.apply(person, instagram, 'id', 'person_id').drop_fields(['person_id'])
joined_all = Join.apply(joined_instagram, twitter, 'id', 'person_id').drop_fields(['person_id'])
# Writing output to S3
output_s3_path = "s3://xxx/xxx/person.json"
output = joined_all.toDF().repartition(1)
output.write.mode("overwrite").json(output_s3_path)
How should the script be changed in order to achieve the desired output?
Thanks
from pyspark.sql.functions import collect_set, lit, struct
...
instagram = instagram.toDF().withColumn( 'channel', lit('instagram') )
instagram = instagram.withColumn( 'profile', struct('id', 'channel') )
twitter = twitter.toDF().withColumn( 'channel', lit('twitter') )
twitter = twitter.withColumn( 'profile', struct('id', 'channel') )
profiles = instagram.union(twitter)
profiles = profiles.groupBy('person_id').agg( collect_set('profile').alias('profiles') )
joined_all = person.join(profiles, person.id == profiles.person_id, 'left_outer').drop('channel', 'person_id')
joined_all.show(n=2, truncate=False)
+---+--------+-----------------------------------------------------+
|id |name |profiles |
+---+--------+-----------------------------------------------------+
|123|John Doe|[[1111, twitter], [2222, twitter], [3333, instagram]]|
|345|Test |null |
+---+--------+-----------------------------------------------------+
.show() doesn't show the full structure of the structs in the profiles field.
print(joined_all.collect())
[Row(id=123, name='John Doe', profiles=[Row(id=1111, channel='twitter'), Row(id=2222, channel='twitter'), Row(id=3333, channel='instagram')]), Row(id=345, name='Test', profiles=None)]

Can't get Service Alerts Protobuff to include header_text or description_text using Python gtfs_realtime_pb2 module

We are having difficulty adding header_text and description_text to a Service Alerts protobuff file. We are attempting to match the example shown on this page here.
https://developers.google.com/transit/gtfs-realtime/examples/alerts
Our data starts in the following dictionary:
alerts_dict = {
"header": {
"gtfs_realtime_version": "1",
"timestamp": "1543318671",
"incrementality": "FULL_DATASET"
},
"entity": [{
"497": {
"active_period": [{
"start": 1525320000,
"end": 1546315200
}],
"url": "http://www.capmetro.org/planner",
"effect": 4,
"header_text": "South 183: Airport",
"informed_entity": [{
"route_type": "3",
"route_id": "17",
"trip": "",
"stop_id": "3304"
}, {
"route_type": "3",
"route_id": "350",
"trip": "",
"stop_id": "3304"
}],
"description_text": "Stop closed temporarily",
"cause": 2
},
"460": {
"active_period": [{
"start": 1519876800,
"end": 1546315200
}],
"url": "http://www.capmetro.org/planner",
"effect": 4,
"header_text": "Ave F / Duval Detour",
"informed_entity": [{
"route_type": "3",
"route_id": "7",
"trip": "",
"stop_id": "1167"
}, {
"route_type": "3",
"route_id": "7",
"trip": "",
"stop_id": "1268"
}],
"description_text": "Stop closed temporarily",
"cause": 2
}
}]
}
Our Python code is as follows:
newfeed = gtfs_realtime_pb2.FeedMessage()
newfeedheader = newfeed.header
newfeedheader.gtfs_realtime_version = '2.0'
for alert_id, alert_dict in alerts_dict["entity"][0].iteritems():
print(alert_id)
print(alert_dict)
newentity = newfeed.entity.add()
newalert = newentity.alert
newentity.id = str(alert_id)
newtimerange = newalert.active_period.add()
newtimerange.end = alert_dict['active_period'][0]['end']
newtimerange.start = alert_dict['active_period'][0]['start']
for informed in alert_dict['informed_entity']:
newentityselector = newalert.informed_entity.add()
newentityselector.route_id = informed['route_id']
newentityselector.route_type = int(informed['route_type'])
newentityselector.stop_id = informed['stop_id']
print(alert_dict['description_text'])
newdescription = newalert.header_text
newdescription = alert_dict['description_text']
newalert.cause = alert_dict['cause']
newalert.effect = alert_dict['effect']
pb_feed = newfeed.SerializeToString()
with open("servicealerts.pb", 'wb') as fout:
fout.write(pb_feed)
The frustrating part is that we don't receive any sort of error message. Everything appears to run properly but the resulting pb file doesn't contain the new header_text or description_text items.
We are able to read the pb file using the following code:
feed = gtfs_realtime_pb2.FeedMessage()
response = open("servicealerts.pb")
feed.ParseFromString(response.read())
print(feed)
We truly appreciate any help that anyone can offer in pointing us in the right direction of figuring this out.
I was able to find the answer. This Python Notebook showed that by properly formatting the dictionary the PB could be generated with a few of lines of code.
from google.transit import gtfs_realtime_pb2
from google.protobuf.json_format import MessageToDict
newfeed = gtfs_realtime_pb2.FeedMessage()
ParseDict(alerts_dict, newfeed)
pb_feed = newfeed.SerializeToString()
with open("servicealerts.pb", 'wb') as fout:
fout.write(pb_feed)
All I had to do was format by dictionary properly.
if ALERT_GROUP_ID not in entity_dict.keys():
entity_dict[ALERT_GROUP_ID] = {"id": ALERT_GROUP_ID,
"alert":{
"active_period": [{
"start": int(START_TIME),
"end": int(END_TIME)
}],
"cause": cause_dict.get(CAUSE, ""),
"effect": effect_dict.get(EFFECT),
"url": {
"translation": [{
"text": URL,
"language": "en"
}]
},
"header_text": {
"translation": [{
"text": HEADER_TEXT,
"language": "en"
}]
},
"informed_entity": [{
'route_id': ROUTE_ID,
'route_type': ROUTE_TYPE,
'trip': TRIP,
'stop_id': STOP_ID
}],
"description_text": {
"translation": [{
"text": "Stop closed temporarily",
"language": "en"
}]
},
},
}
# print(entity_dict[ALERT_GROUP_ID]["alert"]['informed_entity'])
else:
entity_dict[ALERT_GROUP_ID]["alert"]['informed_entity'].append({
'route_id': ROUTE_ID,
'route_type': ROUTE_TYPE,
'trip': TRIP,
'stop_id': STOP_ID
})