I'm trying to migrate a ,currently broken because of breaking changes, utility that solves a .mps using IBM's APIs.
The original code, uses an empty model.tar.gz file, creates a deployment and passes the .mps file tp a new job.
The (python)code looks like this :
import tarfile
tar = tarfile.open("model.tar.gz", "w:gz")
tar.close()
test_metadata = {
client.repository.ModelMetaNames.NAME: "Test",
client.repository.ModelMetaNames.DESCRIPTION: "Model for Test",
client.repository.ModelMetaNames.TYPE: "do-cplex_12.9",
client.repository.ModelMetaNames.RUNTIME_UID: "do_12.9"
}
model_details = client.repository.store_model(model='model.tar.gz', meta_props=test_metadata)
model_uid = client.repository.get_model_uid(model_details)
n_nodes = 1
meta_props = {
client.deployments.ConfigurationMetaNames.NAME: "Test Deployment " + str(n_nodes),
client.deployments.ConfigurationMetaNames.DESCRIPTION: "Test Deployment",
client.deployments.ConfigurationMetaNames.BATCH: {},
client.deployments.ConfigurationMetaNames.COMPUTE: {'name': 'S', 'nodes': n_nodes}
}
deployment_details = client.deployments.create(model_uid, meta_props=meta_props)
deployment_uid = client.deployments.get_uid(deployment_details)
solve_payload = {
client.deployments.DecisionOptimizationMetaNames.SOLVE_PARAMETERS: {
'oaas.logAttachmentName':'log.txt',
'oaas.logTailEnabled':'true',
'oaas.resultsFormat': 'JSON'
},
client.deployments.DecisionOptimizationMetaNames.INPUT_DATA_REFERENCES: [
{
'id':'test.mps',
'type': 's3',
'connection': {
'endpoint_url': COS_ENDPOINT,
'access_key_id': cos_credentials['cos_hmac_keys']["access_key_id"],
'secret_access_key': cos_credentials['cos_hmac_keys']["secret_access_key"]
},
'location': {
'bucket': COS_BUCKET,
'path': 'test.mps'
}
}
],
client.deployments.DecisionOptimizationMetaNames.OUTPUT_DATA_REFERENCES: [
{
'id':'solution.json',
'type': 's3',
'connection': {
'endpoint_url': COS_ENDPOINT,
'access_key_id': cos_credentials['cos_hmac_keys']["access_key_id"],
'secret_access_key': cos_credentials['cos_hmac_keys']["secret_access_key"]
},
'location': {
'bucket': COS_BUCKET,
'path': 'solution.json'
}
},
{
'id':'log.txt',
'type': 's3',
'connection': {
'endpoint_url': COS_ENDPOINT,
'access_key_id': cos_credentials['cos_hmac_keys']["access_key_id"],
'secret_access_key': cos_credentials['cos_hmac_keys']["secret_access_key"]
},
'location': {
'bucket': COS_BUCKET,
'path': 'log.txt'
}
}
]
}
job_details = client.deployments.create_job(deployment_uid, solve_payload)
The closest I've managed to do (which is almost exactly what I need), is use most of the code from this example:
https://github.com/IBM/watson-machine-learning-samples/blob/master/cloud/notebooks/python_sdk/deployments/decision_optimization/Use%20Decision%20Optimization%20to%20plan%20your%20diet.ipynb
Here is a full working sample.
from ibm_watson_machine_learning import APIClient
import os
import wget
import json
import pandas as pd
import time
COS_ENDPOINT = "https://s3.ams03.cloud-object-storage.appdomain.cloud"
model_path = 'do-model.tar.gz'
api_key = 'XXXXX'
access_key_id = "XXXX",
secret_access_key= "XXXX"
location = 'eu-gb'
space_id = 'XXXX'
softwareSpecificationName = "do_12.9"
modelType = "do-docplex_12.9"
wml_credentials = {
"apikey": api_key,
"url": 'https://' + location + '.ml.cloud.ibm.com'
}
client = APIClient(wml_credentials)
client.set.default_space(space_id)
if not os.path.isfile(model_path):
wget.download("https://github.com/IBM/watson-machine-learning-samples/raw/master/cloud/models/decision_optimization/do-model.tar.gz")
sofware_spec_uid = client.software_specifications.get_uid_by_name(softwareSpecificationName)
model_meta_props = {
client.repository.ModelMetaNames.NAME: "LOCALLY created DO model",
client.repository.ModelMetaNames.TYPE: modelType,
client.repository.ModelMetaNames.SOFTWARE_SPEC_UID: sofware_spec_uid
}
published_model = client.repository.store_model(model=model_path, meta_props=model_meta_props)
time.sleep(5) # So that the model is avalable on the API
published_model_uid = client.repository.get_model_uid(published_model)
client.repository.list_models()
meta_data = {
client.deployments.ConfigurationMetaNames.NAME: "deployment_DO",
client.deployments.ConfigurationMetaNames.BATCH: {},
client.deployments.ConfigurationMetaNames.HARDWARE_SPEC: {"name": "S", "num_nodes": 1}
}
deployment_details = client.deployments.create(published_model_uid, meta_props=meta_data)
time.sleep(5) # So that the deployment is avalable on the API
deployment_uid = client.deployments.get_uid(deployment_details)
client.deployments.list()
job_payload_ref = {
client.deployments.DecisionOptimizationMetaNames.INPUT_DATA_REFERENCES: [
{
'id':'diet_food.csv',
'type': 's3',
'connection': {
'endpoint_url': COS_ENDPOINT,
'access_key_id': access_key_id,
'secret_access_key': secret_access_key
},
'location': {
'bucket': "gvbucketname0api",
'path': "diet_food.csv"
}
},
{
'id':'diet_food_nutrients.csv',
'type': 's3',
'connection': {
'endpoint_url': COS_ENDPOINT,
'access_key_id': access_key_id,
'secret_access_key': secret_access_key
},
'location': {
'bucket': "gvbucketname0api",
'path': "diet_food_nutrients.csv"
}
},
{
'id':'diet_nutrients.csv',
'type': 's3',
'connection': {
'endpoint_url': COS_ENDPOINT,
'access_key_id': access_key_id,
'secret_access_key': secret_access_key
},
'location': {
'bucket': "gvbucketname0api",
'path': "diet_nutrients.csv"
}
}
],
client.deployments.DecisionOptimizationMetaNames.OUTPUT_DATA_REFERENCES:
[
{
'id':'.*',
'type': 's3',
'connection': {
'endpoint_url': COS_ENDPOINT,
'access_key_id': access_key_id,
'secret_access_key':secret_access_key
},
'location': {
'bucket': "gvbucketname0api",
'path': "${job_id}/${attachment_name}"
}
}
]
}
job = client.deployments.create_job(deployment_uid, meta_props=job_payload_ref)
The above example uses a model and a few csv files as input.
When I change the INPUT_DATA_REFERENCES to use a .mps file (and an empty model), I get an error
"errors": [
{
"code": "invalid_model_archive_in_deployment",
"message": "Invalid or unrecognized archive type in deployment `XXX-XXX-XXX`.
Supported archive types are `zip` or `tar.gz`"
}
I'm not an expert but from what I understand the mps file contains both the input and the model file so I shouldn't have to provide both.
The answer was provided by Alex Fleischer on another forum.
A full example can be found here:
https://medium.com/#AlainChabrier/solve-lp-problems-from-do-experiments-9afd4d53aaf5
The above link(which is similar to the code in my question) shows an example with a ".lp" file but it's exactly the same for a ".mps" file too.
(no note that the model type is do-cplex_12.10 , not do-docplex_12.10)
My problem was that I was using an empty model.tar.gz file.
Once you have the .lp/.mps file in the archive, everything works as expected
Related
I have problem when I want create new model or if not exist, update it.
For example, I have data in a database:
{
"unix": 1668380400,
"type": "soup",
"order": 1,
"value": "Chicken"
},
{
"unix": 1668380400,
"type": "main",
"order": 0,
"value": "Gulash"
},
{
"unix": 1668553200,
"type": "soup",
"order": 0,
"value": "Asian"
}
}
I want to get to the point that when unix and type and order are the same - modify the value. But if the element with the same unix, order and type is not found in the database - add a completely new record to the db.
I thought this was how I would achieve the desired state. But a mistake.
router.post("/add", async (req, res) => {
const data = req.body;
await data.map((el) => {
const { unix, type, order, value } = el;
Menu.findOneAndUpdate(
{ unix, type, order },
{ unix, type, order, value },
{ new: true, upsert: true }
);
});
res.status(201).json({ status: true });
});
req.body:
[
{
"unix": 1668380400,
"type": "main",
"order": 2,
"value": "Sushi"
},
{
"unix": 1668553200,
"type": "soup",
"order": 0,
"value": "Thai"
}
]
Thanks for any help.
I think I found a solution. Everything works as it should, but wouldn't it be better to send the data with JSON.stringify() and then parse this data on the servers using JSON.parse()?
Another thing is the map method. Is it OK like this? Can't cause throttling?
router.post("/add", (req, res) => {
const data = req.body;
data.map(async (el) => {
const { unix, type, order, value } = el;
await Menu.findOneAndUpdate(
{ unix, type, order },
{ value },
{ upsert: true }
);
});
res.status(201).json({ status: true });
});
I am already accessing one class from my model class, now I intend on accessing a different one from the same model class.
context : I am already accessing "field_value" under "Field" which is under "ListNode" and now I want to access the "run_hyperlink" which is right under the "ListNode" so that I can associate my list tiles with its "run_hyperlink" accordingly.
right now the code to extract the "run_hyperlink" is returning null for some reason :
This is the full code:
() async {
var newMessage = await (ReadCache.getString(key: 'cache1'));
var response = await http.get(
Uri.parse(
'http://192.168.1.4:8080/HongLeong/MENU_REQUEST.do?_dc=1658076505340&reset_context=1&table_id=25510&id_MenuAction=3&path=%3Cp%20class%3D%22x-window-header-path%22%3ELoss%20Event%3C%2Fp%3E&ViewType=MENU_REQUEST&gui_open_popup=1&id_Window=17&activeWindowId=mw_17&noOrigUserDate=true&LocalDate=20220718&LocalTime=00482500&TimeZone=Asia%2FShanghai&UserDate=0&UserTime=0&server_name=OPRISK_DATACOLLECTOR&key_id_list=&cell_context_id=0&id_Desktop=100252&operation_key=1000184&operation_sub_num=-1&is_json=1&is_popup=0&is_search_window=0&ccsfw_conf_by_user=0&is_batch=0&previousToken=1658069547560&historyToken=1658076505339&historyUrl=1'),
headers: {HttpHeaders.cookieHeader: newMessage},
);
LossEventResponseModel lossEventResponseModel =
LossEventResponseModel.fromJson(jsonDecode(response.body));
final listNode = lossEventResponseModel.response.genericListAnswer.listNode;
List<Map<String, dynamic>> incidentList = [
for (final json in listNode.map((x) => x.toJson()))
{
'Code': json['field'][0]['field_value'],
'Description': json['field'][1]['field_value'],
'Organisation Unit': json['field'][46]['field_value'],
'Date Reported': json['field'][18]['field_value'],
'Status': json['field'][4]['field_value'],
}
];
final List<String> values = [];
for(final item in incidentList){
values.add(item.values.map((e) => e.toString()).join("\n"));
}
await WriteCache.setListString(key: 'cache4', value: values);
RunHyperlink runHyperLink =
RunHyperlink.fromJson(jsonDecode(response.body));
final madKey = runHyperLink.madKey;
print(madKey);
Navigator.of(context).push(
MaterialPageRoute(builder: (context) => LossEvent()));
}
This is what I am displaying right now :
how do I extract the "run_hyperlink" and link it to its corresponding List Tile.
this is one entry, the fields are displayed as shown and the "run_hyperlink" will preferable become the tiles ID in a sense :
this is the full HTTP response btw :
{
"RESPONSE":{
"GenericListAnswer":{
"empty_top_toolbar":"0",
"no_move_columns":"0",
"no_add_rem_columns":"0",
"no_sort_columns":"0",
"no_resize_columns":"0",
"no_oper_columns":"0",
"refresh_data":{
"ApplEra":"",
"delay":"0"
},
"no_gui_link":"0",
"index_last":"10742",
"max_num_rows":"0",
"index_true_last":"10742",
"is_history":"0",
"index_first_visible":"1",
"display_table_name":"Loss Event",
"table_id":"25510",
"table_name":"LossEvent",
"global_operation":[],
"num_visible":"200",
"list_type":"0",
"list_key":"1",
"parent_Class_id":"0",
"parent_common_id":"0",
"parent_applcol_id":"0",
"search_num":"0",
"id_box_search_num":"0",
"mad_key_list":"",
"choose_xsl":"0",
"is_search_popup":"0",
"section_list_params":"",
"saved_query":{
"query":[
{
"id":"184",
"name":"Loss Event",
"note":"",
"is_batch":"0",
"is_validated":"1",
"show_out_combo":"0",
"sort_key":"0",
"custom_gif":"",
"description":"Loss Event",
"may_have_checkbox":"0",
"is_active":"0",
"qtype":"2",
"may_be_edited":"1",
"pinnability":"2"
},
{
"id":"187",
"name":"Loss Event Days Open Tracking Report",
"note":"",
"is_batch":"0",
"is_validated":"1",
"show_out_combo":"0",
"sort_key":"0",
"custom_gif":"",
"description":"Loss Event Days Open Tracking Report",
"may_have_checkbox":"0",
"is_active":"0",
"qtype":"2",
"may_be_edited":"1",
"pinnability":"2"
},
{
"id":"185",
"name":"Loss Event 1",
"note":"",
"is_batch":"0",
"is_validated":"1",
"show_out_combo":"0",
"sort_key":"0",
"custom_gif":"",
"description":"Loss Event 1",
"may_have_checkbox":"0",
"is_active":"0",
"qtype":"2",
"may_be_edited":"1",
"pinnability":"2"
},
{
"id":"279",
"name":"T Loss Event Status summary",
"note":"",
"is_batch":"0",
"is_validated":"1",
"show_out_combo":"0",
"sort_key":"0",
"custom_gif":"",
"description":"T Loss Event Status summary",
"may_have_checkbox":"0",
"is_active":"0",
"qtype":"2",
"may_be_edited":"1",
"pinnability":"2"
}
]
},
"QuickSearch":{
"quick_search_column":[
"Title",
"Tajuk",
"Code"
]
},
"report_jasper_list":{
"report_jasper":[
{
"display_name":"LE002 - Loss Event Status Summary",
"id":"15",
"mad_key":"15",
"is_param":"1",
"report_options":"",
"reportForbiddenFormat":[
"2",
"4",
"5",
"6"
]
},
{
"display_name":"LE003 - Loss Event Days Open Tracking Report",
"id":"5",
"mad_key":"5",
"is_param":"1",
"report_options":"",
"reportForbiddenFormat":[
"2",
"4",
"5",
"6"
]
},
{
"display_name":"LE01 - Loss Event Data",
"id":"2",
"mad_key":"2",
"is_param":"1",
"report_options":"",
"reportForbiddenFormat":[
"2",
"4",
"5",
"6"
]
}
]
},
"wf_configured":"1",
"op_parameter":"",
"ListNode":[
{
"id":"2",
"mad_key":"32835",
"is_custom":"0",
"is_locked":"0",
"is_inactive":"1",
"run_hyperlink":{
"classid":"25510",
"id":"2",
"mad_key":"32835"
},
"field":[
{
"field_name":"code",
"col_index":"1",
"field_value":"LE-0000000002",
"mad_key":"0",
"id":"0"
},
{
"field_name":"common_desc_0",
"col_index":"2",
"field_value":"test_01",
"mad_key":"0",
"id":"0"
},
{
"field_name":"id_Org",
"col_index":"3",
"field_value":"01_01_04_01_SA - Shah Alam",
"mad_key":"100377",
"id":"100055"
},
{
"field_name":"dateReported",
"col_index":"4",
"field_value":"18/09/2020",
"mad_key":"0",
"id":"0"
}
]
},
{
"id":"3",
"mad_key":"32836",
"is_custom":"0",
"is_locked":"0",
"is_inactive":"0",
"run_hyperlink":{
"classid":"25510",
"id":"3",
"mad_key":"32836"
},
"field":[
{
"field_name":"code",
"col_index":"1",
"field_value":"LE-0000000003",
"mad_key":"0",
"id":"0"
},
{
"field_name":"common_desc_0",
"col_index":"2",
"field_value":"Transactions not reported (intentional)",
"mad_key":"0",
"id":"0"
},
{
"field_name":"id_Org",
"col_index":"3",
"field_value":"01_01_04_01_HQ - Menara Hong Leong, Damansara City",
"mad_key":"100451",
"id":"100069"
},
{
"field_name":"dateReported",
"col_index":"4",
"field_value":"03/02/2018",
"mad_key":"0",
"id":"0"
}
]
},
{
"id":"22",
"mad_key":"20234",
"is_custom":"0",
"is_locked":"0",
"is_inactive":"0",
"run_hyperlink":{
"classid":"25510",
"id":"22",
"mad_key":"20234"
},
"field":[
{
"field_name":"code",
"col_index":"1",
"field_value":"LE-0000000022",
"mad_key":"0",
"id":"0"
},
{
"field_name":"common_desc_0",
"col_index":"2",
"field_value":"Transaction type unauthorised",
"mad_key":"0",
"id":"0"
},
{
"field_name":"id_Org",
"col_index":"3",
"field_value":"01_01_04_01_HQ - Menara Hong Leong, Damansara City",
"mad_key":"100451",
"id":"100069"
},
{
"field_name":"dateReported",
"col_index":"4",
"field_value":"04/02/2018",
"mad_key":"0",
"id":"0"
}
]
},
"BASEL_RESPONSE":{
"UserDate":"0",
"UserTime":"0",
"CurrentTimeStamp":"26/07/2022 21:24:57 +0800",
"UserName":"Administrator",
"module_config_1":"0",
"module_config_2":"0",
"ErrEntity":{
"MessageID":"0",
"last_req_id":"50033",
"table_id":"25510",
"key_id_list":"536901,536902,536905,536909,536910,536913",
"operation_id":"0"
},
"is_csv":"0",
"VersionName":"DYMA # 6.1.24.0, ORG # 2017.3.22.15.0.41, GRC # 2017.3.22.15.0.55, LDC # 2017.3.22.15.1.8, DYMA_XML # 2017.3.22.15.0.30, NAS # 2017.3.22.15.1.22 - Config: 0 - Node: OPRISK_DATACOLLECTOR",
"ExpiryDate":"31/01/2030",
"count_key":"2",
"id_Us":"1",
"is_popup":"0",
"tot_messages":"0",
"my_messages":"0",
"product":"0"
},
"RESPONSE_HEADER":{
"SessionID":"qNQzbCTdM1821510940CzbeWQ0001",
"NomeRichiesta":"MENU_REQUEST",
"ltimeStart":"21245695",
"ltimeStop":"21250041",
"ldate_null":"19900101",
"product":"1",
"server_name":"OPRISK_DATACOLLECTOR",
"cell_context_id":"538044",
"operation_key":"100",
"operation_sub_num":"-1"
}
}
}
I have a nested document structure and I am able to filter it with pluck to show the relevant parts:
Is there an elegant way to merge all entries of the last level to a single array?
Expected result (entries are not unique on purpose):
[
'3425b91f-f019-4db3-ad56-c336bf55279b',
'3d07946e-183d-4992-9acd-676f5122e1b1',
'3425b91f-f019-4db3-ad56-c336bf55279b',
'3d07946e-183d-4992-9acd-676f5122e1b1',
'2cd652a6-4dcd-4920-9592-d4cdc5a034bf',
'70fe1812-e1de-447b-ac4f-d89fead4756d',
'2cd652a6-4dcd-4920-9592-d4cdc5a034bf',
'70fe1812-e1de-447b-ac4f-d89fead4756d'
]
I tried to use
r.table('periods')['regions']['sites']['plants']['product']['process']['technologies'].run()
but it gives the error "Cannot perform bracket on a sequence of sequences".
=> Is there some alternative operator to get a merged sequence instead a "sequence of sequences" for each step?
Something like
r.table('periods').unwind('regions.sites.plants.product.process.technologies')
Here is some python code to create example data:
from rethinkdb import RethinkDB
r = RethinkDB()
r.connect({}).repl()
r.table_create("periods")
def uniqueid():
return r.uuid().run()
periodid_first = uniqueid()
periodid_second = uniqueid()
companyid_2000 = uniqueid()
companyid_2001 = uniqueid()
technologyid_2000_first = uniqueid()
technologyid_2000_second = uniqueid()
technologyid_2001_first = uniqueid()
technologyid_2001_second = uniqueid()
energy_carrierid_2000_first = uniqueid()
energy_carrierid_2000_second = uniqueid()
energy_carrierid_2001_first = uniqueid()
energy_carrierid_2001_second = uniqueid()
periods = [
{
'id': periodid_first,
'start': 2000,
'end': 2000,
# 'sub_periods': [],
'regions': [
{
'id': 'DE',
# 'sub_regions': [],
'sites': [
{
'id': 'first_site_in_germany',
'company': companyid_2000, # => verweist auf periods => companies
'plants': [
{
'id': 'qux',
'product': {
'id': 'Ammoniak',
'process': {
'id': 'SMR+HB',
'technologies': [
technologyid_2000_first, # => verweist auf periods => technologies
technologyid_2000_second
]
}
}
}
]
}
]
},
{
'id': 'FR',
# 'sub_regions': [],
'sites': [
{
'id': 'first_site_in_france',
'company': companyid_2000, # => verweist auf periods => companies
'plants': [
{
'id': 'qux',
'product': {
'id': 'Ammoniak',
'process': {
'id': 'SMR+HB',
'technologies': [
technologyid_2000_first, # => verweist auf periods => technologies
technologyid_2000_second
]
}
}
}
]
}
]
}
],
'companies': [
{
'id': companyid_2000,
'name': 'international_company'
}
],
'technologies': [
{
'id': technologyid_2000_first,
'name': 'SMR',
'specific_cost_per_year': 123,
'specific_energy_consumptions': [
{
'energy_carrier': energy_carrierid_2000_first,
'specific_consumption': 5555
}, # => verweist auf periods => energy_carriers
{
'energy_carrier': energy_carrierid_2000_second,
'energy_consumption': 2333
}
]
},
{
'id': technologyid_2000_second,
'name': 'HB',
'specific_cost_per_year': 1234,
'specific_energy_consumptions': [
{
'energy_carrier': energy_carrierid_2000_first,
'specific_consumption': 555
}, # => verweist auf periods => energy_carriers
{
'energy_carrier': energy_carrierid_2000_second,
'energy_consumption': 233
}
]
}
],
'energy_carriers': [
{
'id': energy_carrierid_2000_first,
'name': 'oil',
'group': 'fuel'
},
{
'id': energy_carrierid_2000_second,
'name': 'gas',
'group': 'fuel'
},
{
'id': uniqueid(),
'name': 'conventional',
'group': 'electricity'
},
{
'id': uniqueid(),
'name': 'green',
'group': 'electricity'
}
],
'networks': [
{
'id': uniqueid(),
'name': 'gas',
'sub_networks': [],
'pipelines': [
]
},
{
'id': uniqueid(),
'name': 'gas',
'sub_networks': [],
'pipelines': [
]
}
]
},
{
'id': periodid_second,
'start': 2001,
'end': 2001,
# 'sub_periods': [],
'regions': [
{
'id': 'DE',
# 'sub_regions': [],
'sites': [
{
'id': 'first_site_in_germany',
'company': companyid_2001, # => verweist auf periods => companies
'plants': [
{
'id': 'qux',
'product': {
'id': 'Ammoniak',
'process': {
'id': 'SMR+HB',
'technologies': [
technologyid_2001_first, # => verweist auf periods => technologies
technologyid_2001_second
]
}
}
}
]
}
]
},
{
'id': 'FR',
# 'sub_regions': [],
'sites': [
{
'id': 'first_site_in_france',
'company': companyid_2001, # => verweist auf periods => companies
'plants': [
{
'id': 'qux',
'product': {
'id': 'Ammoniak',
'process': {
'id': 'SMR+HB',
'technologies': [
technologyid_2001_first, # => verweist auf periods => technologies
technologyid_2001_second
]
}
}
}
]
}
]
}
],
'companies': [
{
'id': companyid_2001,
'name': 'international_company'
}
],
'technologies': [
{
'id': technologyid_2001_first,
'name': 'SMR',
'specific_cost_per_year': 123,
'specific_energy_consumptions': [
{
'energy_carrier': energy_carrierid_2001_first,
'specific_consumption': 5555
}, # => verweist auf periods => energy_carriers
{
'energy_carrier': energy_carrierid_2001_second,
'energy_consumption': 2333
}
]
},
{
'id': technologyid_2001_second,
'name': 'HB',
'specific_cost_per_year': 1234,
'specific_energy_consumptions': [
{
'energy_carrier': energy_carrierid_2001_first,
'specific_consumption': 555
}, # => verweist auf periods => energy_carriers
{
'energy_carrier': energy_carrierid_2001_second,
'energy_consumption': 233
}
]
}
],
'energy_carrieriers': [
{
'id': energy_carrierid_2001_first,
'name': 'oil',
'group': 'fuel'
},
{
'id': energy_carrierid_2001_second,
'name': 'gas',
'group': 'fuel'
},
{
'id': uniqueid(),
'name': 'conventional',
'group': 'electricity'
},
{
'id': uniqueid(),
'name': 'green',
'group': 'electricity'
}
],
'networks': [
{
'id': uniqueid(),
'name': 'gas',
'sub_networks': [],
'pipelines': [
]
},
{
'id': uniqueid(),
'name': 'gas',
'sub_networks': [],
'pipelines': [
]
}
]
}
]
r.table('periods') \
.insert(periods) \
.run()
Related:
RethinkDB: RqlRuntimeError: Cannot perform bracket on a sequence of sequences
Nested concat_map in combination with r.row operator and bracket drill down does the trick:
r.table('periods') \
.concat_map(r.row['regions']) \
.concat_map(r.row['sites']) \
.concat_map(r.row['plants'])['product']['process'] \
.concat_map(r.row['technologies']) \
.run()
I have following list of maps,
[
{
"FullName":"Harry Potter",
"DateOfBirth": "2020/02/16",
"Department":"Branch Operation",
"BirthDay":"Friday"
},
{
"FullName":"John Wick",
"DateOfBirth": "2020/02/16",
"Department":"Finance",
"BirthDay":"Friday"
},
{
"FullName":"Solomon Kane",
"DateOfBirth":2020/02/19,
"Department":"Loan",
"BirthDay":"Monday"
}
]
I would like to manipulate above data such that data are grouped by their DateOfBirth, so that result would look like this.
[
{
"DateOfBirth": "2020/02/16",
"BirthDay": "Friday",
"Data":[
{
"FullName": "Harry Potter",
"Department":"Branch Operation",
},
{
"FullName":"John Wick",
"Department":"Finance",
}
]
},
{
"DateOfBirth": "2020/02/19",
"BirthDay": "Monday",
"Data":[
{
"FullName":"Solomon Kane",
"Department":"Loan"
}
]
},
]
In Javascript, this can be achieved by using reduce function and then using Object key mapping.
I also know dart has useful package called collection
As I am new to dart and flutter, I am not sure how to do. Can anybody help me on this?
Thanks
You could use fold and do something like this
const data = [...];
void main() {
final value = data.fold(Map<String, List<dynamic>>(), (Map<String, List<dynamic>> a, b) {
a.putIfAbsent(b['DateOfBirth'], () => []).add(b);
return a;
}).values
.where((l) => l.isNotEmpty)
.map((l) => {
'DateOfBirth': l.first['DateOfBirth'],
'BirthDay': l.first['BirthDay'],
'Data': l.map((e) => {
'Department': e['Department'],
'FullName': e['FullName'],
}).toList()
}).toList();
}
Or like this if you want to use the spread operator, I don't know if its very readable though.
final result = data.fold({}, (a, b) => {
...a,
b['DateOfBirth']: [b, ...?a[b['DateOfBirth']]],
}).values
.where((l) => l.isNotEmpty)
.map((l) => {
'DateOfBirth': l.first['DateOfBirth'],
'BirthDay': l.first['BirthDay'],
'Data': l.map((e) => {
'Department': e['Department'],
'FullName': e['FullName'],
}).toList()
}).toList();
i want to increase mongodb document number automatically using loopback.
I made function in mongo
function getNextSequence(name) {
var ret = db.counters.findAndModify(
{
query: { _id: name },
update: { $inc: { seq: 1 } },
new: true
}
);
return ret.seq;
}
db.tweet.insert(
{
"_id" : getNextSequence("userid"),
"content": "test",
"date": "1",
"ownerUsername": "1",
"ownerId": "1"
}
)
It is working in mongo shell.
However when I insert using loopback.js browser (http://localhost:3000/explorer/), It is not working.
400 error(SytaxError) code is showing.
I can not use mongo function in loopback rest API ?
I think problem is quotes in this line getNextSequence("userid"),
Create a collection counters with properties value and collection
{
"name": "counters",
"base": "PersistedModel",
"idInjection": true,
"options": {
"validateUpsert": true
},
"properties": {
"type": "number",
"collection": "string"
},
"validations": [],
"relations": {},
"acls": [
{
"accessType": "*",
"principalType": "ROLE",
"principalId": "$everyone",
"permission": "ALLOW"
}
],
"methods": []
}
Now supposing your auto-increment collection name tweets.
Insert this value to counters.
{
"value" : 0,
"collection" : "tweet"
}
Now common/models/tweet.js
tweet.observe('before save', function (ctx, next) {
var app = ctx.Model.app;
//Apply this hooks for save operation only..
if(ctx.isNewInstance){
//suppose my datasource name is mongodb
var mongoDb = app.dataSources.mongodb;
var mongoConnector = app.dataSources.mongodb.connector;
mongoConnector.collection("counters").findAndModify({collection: 'tweet'}, [['_id','asc']], {$inc: { value: 1 }}, {new: true}, function(err, sequence) {
if(err) {
throw err;
} else {
// Do what I need to do with new incremented value sequence.value
//Save the tweet id with autoincrement..
ctx.instance.id = sequence.value.value;
next();
} //else
});
} //ctx.isNewInstance
else{
next();
}
}); //Observe before save..
I would love to add 1 more point to Robins Answer,you can add upsert:true so that it automatically creates the document if it doesn't exist
tweet.observe('before save', function (ctx, next) {
var app = ctx.Model.app;
//Apply this hooks for save operation only..
if(ctx.isNewInstance){
//suppose my datasource name is mongodb
var mongoDb = app.dataSources.mongodb;
var mongoConnector = app.dataSources.mongodb.connector;
mongoConnector.collection("counters").findAndModify({collection: 'tweet'}, [['_id','asc']], {$inc: { value: 1 }}, {new: true,upsert:true}, function(err, sequence) {
if(err) {
throw err;
} else {
// Do what I need to do with new incremented value sequence.value
//Save the tweet id with autoincrement..
ctx.instance.id = sequence.value.value;
next();
} //else
});
} //ctx.isNewInstance
else{
next();
}
}); //Observe before save..
You can do something like in this example for loopback 4
let last_record = await this.testRepository.findOne({order: ['id DESC']});
if(last_record) invoice.id = last_record.id+1;
This will generate your model with the property:
#property({
type: 'number',
id: true,
default: 1,
generated: false
})
id: number;
Hopefully, this helps, please write me if there is any other code. Thanks
If you want to use MongoDB operators directly in loopback methods you need to enable the option "allowExtendedOperators", you can do so on a per model basis or at the data source level (will apply to all models using the data source).
datasources.json:
"MongoDs": {
"host": "127.0.0.1",
"port": 27017,
"url": "mongodb://localUser:MYPASSWORD!#127.0.0.1:27017/test-database",
"database": "test-database",
"password": "MYPASSWORD!",
"name": "MongoDs",
"user": "localUser",
"useNewUrlParser": true,
"connector": "mongodb",
"allowExtendedOperators": true
},