How to get different values selected on selectInput - select

I want to introduce my csv data into this application which is appended to this question: what I'm aiming to is to make sure that the two select Input are loaded with different values whatever the user's selecting! The problem with my code is that the first selectInput is blocked to the same initial value while it must change and prevent the second to have its value every time !
ui <- fluidPage(
fileInput('file1', 'Upload your CSV File'),
htmlOutput("variables"),
htmlOutput("facteurs"),
uiOutput("tb"))
server <- function(input, output, session) {
myData1 <- reactive({
inFile <- input$file1
if (is.null(inFile)) return(NULL)
data <- read.csv(inFile$datapath, header = TRUE,row.names=1)
data
})
output$variables <- renderUI({
df_init <- myData1()
x=sapply(df_init,class)
x=(x=="numeric")
df=df_init[,x]
if (identical(df, '') || identical(df,data.frame())) return(NULL)
selectInput(inputId = "V1", label = "Variables to use: Y", choices=names(df), selected=names(df[1]))
})
output$facteurs <- renderUI({
df_init <- myData1()
x=sapply(df_init,class)
x=(x=="factor")
df=df_init[,x]
if (identical(df, '') || identical(df,data.frame())) return(NULL)
verticalLayout(
selectInput(inputId = "F11", label = "Factors to use: X1", choices=names(df)),
selectInput(inputId = "F12", label = "Factors to use: X2", choices=names(df)[names(df)!=input$F11]))
})}
shinyApp(ui = ui, server = server)
The data is
,Var,Lo,ES,Acidity,K232,K270,IP,OS,C 14:0,C 16:0,C 16:1,C 17:0,C 17:1,C 18:0,C 18:1,C 18:2,C 18:3,C 20:0,C 20:1,total,LLL,LnLO,LnLP,LLO,LnOO,PLL,LOO,LOP,PLP,OOO,POP,POO,AOL,SOO,SOP,Chlorophyll,b carotène,polyphenols ,Ethyl acetate,2- Methyl butanal,3- Methyl butanal,1-Penten-3-one,3-Hexanone,Hexanal,3-Pentanol,Trans-2-pentenal,1-Penten-3-ol,Cis-3-hexenal,Trans-2-hexenal,1-Pentanol,Hexyl acetate,Cis-3-hexenyl acetate,Cis-2-pentenol,6-Methyl-5-hepten-2-one,1-Hexanol,Trans-3-hexenol,Cis-3-hexenol,Trans-2-hexenol,Acetic acid,Butyric acid,H- Tyr ,Tyr ,DFOA,DFLA,Ac-Pin,Pin,EAA,OA,LA,total phenols (HPLC)
P1,chetoui,beja,sp,0.93,1.49,0.2,9.2,16.51,0.01,13.5,0.57,0.07,0.08,2.57,67.04,18.56,1.16,0.02,0.39,103.95,0.72,0.45,0.1,7.87,2.63,0.38,22.14,8.8,0.7,33.47,15.84,1.77,0.37,3.95,0.8,5.05,4.1,491.6,2.72,0.29,0.11,0.08,0.1,15.27,1.35,1.55,3.77,22.68,133.13,0.36,9.92,7.14,2.37,0.5,10.03,0.78,121.11,14.12,0.05,0.08,1.91,4.15,10.33,40.52,1.21,5.5,2.92,30.65,2.35,99.53
P2,chetoui,beja,sp,0.36,1.24,0.2,8.2,16.81,0.01,13.39,0.18,0.05,0.07,1.23,69.23,18.63,0.91,0.02,0.28,104,0.69,0.43,0.15,7.81,2.57,0.42,22.21,8.84,0.71,33.87,15.6,2.01,0.38,4.14,1.01,5.88,6.161,457.04,2.52,0.34,0.12,0.09,0.22,15.2,1.32,1.52,3.67,22.61,133.19,0.35,9.89,7.18,2.34,0.51,10.17,0.75,121.21,14.29,0.05,0.02,1.92,4.05,10.45,40.63,1.25,5.55,2.95,31.042,2.17,100.01
P3,chetoui,beja,sp,0.84,1.87,0.21,8.6,16.73,0.01,13.31,0.45,0.06,0.08,2.54,69.29,17.03,0.84,0.02,0.37,104,0.72,0.42,0.12,7.82,2.61,0.43,21.22,8.83,0.72,33.85,15.52,1.92,0.39,4.05,0.95,5.92,6.241,482.12,2.72,0.25,0.08,1.12,0.42,15.01,1.3,1.44,3.93,22.51,133.07,0.39,9.87,7.16,2.31,0.52,10.1,0.76,121.29,14.21,0.06,0.01,1.93,4.12,10.6,40.71,1.26,5.54,2.96,30.43,2.26,99.81

Related

py.test capture unhandled exception

We are using py.test 2.8.7 and I have the below method which creates a separate log file for every test-case. However this does not handle unhandled Exceptions. So if a code snippet throws an Exception instead of failing with an assert, the stack-trace of the Exception is not logged into the separate file. Can someone please help me in how I could capture these Exceptions?
def remove_special_chars(input):
"""
Replaces all special characters which ideally shout not be included in the name of a file
Such characters will be replaced with a dot so we know there was something useful there
"""
for special_ch in ["/", "\\", "<", ">", "|", "&", ":", "*", "?", "\"", "'"]:
input = input.replace(special_ch, ".")
return input
def assemble_test_fqn(node):
"""
Assembles a fully-qualified name for our test-case which will be used as its test log file name
"""
current_node = node
result = ""
while current_node is not None:
if current_node.name == "()":
current_node = current_node.parent
continue
if result != "":
result = "." + result
result = current_node.name + result
current_node = current_node.parent
return remove_special_chars(result)
# This fixture creates a logger per test-case
#pytest.yield_fixture(scope="function", autouse=True)
def set_log_file_per_method(request):
"""
Creates a separate file logging handler for each test method
"""
# Assembling the location of the log folder
test_log_dir = "%s/all_test_logs" % (request.config.getoption("--output-dir"))
# Creating the log folder if it does not exist
if not os.path.exists(test_log_dir):
os.makedirs(test_log_dir)
# Adding a file handler
test_log_file = "%s/%s.log" % (test_log_dir, assemble_test_fqn(request.node))
file_handler = logging.FileHandler(filename=test_log_file, mode="w")
file_handler.setLevel("INFO")
log_format = request.config.getoption("--log-format")
log_formatter = logging.Formatter(log_format)
file_handler.setFormatter(log_formatter)
logging.getLogger('').addHandler(file_handler)
yield
# After the test finished, we remove the file handler
file_handler.close()
logging.getLogger('').removeHandler(file_handler)
I have ended-up with a custom plugin:
import io
import os
import pytest
def remove_special_chars(text):
"""
Replaces all special characters which ideally shout not be included in the name of a file
Such characters will be replaced with a dot so we know there was something useful there
"""
for special_ch in ["/", "\\", "<", ">", "|", "&", ":", "*", "?", "\"", "'"]:
text = text.replace(special_ch, ".")
return text
def assemble_test_fqn(node):
"""
Assembles a fully-qualified name for our test-case which will be used as its test log file name
The result will also include the potential path of the log file as the parents are appended to the fqn with a /
"""
current_node = node
result = ""
while current_node is not None:
if current_node.name == "()":
current_node = current_node.parent
continue
if result != "":
result = "/" + result
result = remove_special_chars(current_node.name) + result
current_node = current_node.parent
return result
def as_unicode(text):
"""
Encodes a text into unicode
If it's already unicode, we do not touch it
"""
if isinstance(text, unicode):
return text
else:
return unicode(str(text))
class TestReport:
"""
Holds a test-report
"""
def __init__(self, fqn):
self._fqn = fqn
self._errors = []
self._sections = []
def add_error(self, error):
"""
Adds an error (either an Exception or an assertion error) to the list of errors
"""
self._errors.append(error)
def add_sections(self, sections):
"""
Adds captured sections to our internal list of sections
Since tests can have multiple phases (setup, call, teardown) this will be invoked for all phases
If for a newer phase we already captured a section, we override it in our already existing internal list
"""
interim = []
for current_section in self._sections:
section_to_add = current_section
# If the current section we already have is also present in the input parameter,
# we override our existing section with the one from the input as that's newer
for index, input_section in enumerate(sections):
if current_section[0] == input_section[0]:
section_to_add = input_section
sections.pop(index)
break
interim.append(section_to_add)
# Adding the new sections from the input parameter to our internal list
for input_section in sections:
interim.append(input_section)
# And finally overriding our internal list of sections
self._sections = interim
def save_to_file(self, log_folder):
"""
Saves the current report to a log file
"""
# Adding a file handler
test_log_file = "%s/%s.log" % (log_folder, self._fqn)
# Creating the log folder if it does not exist
if not os.path.exists(os.path.dirname(test_log_file)):
os.makedirs(os.path.dirname(test_log_file))
# Saving the report to the given log file
with io.open(test_log_file, 'w', encoding='UTF-8') as f:
for error in self._errors:
f.write(as_unicode(error))
f.write(u"\n\n")
for index, section in enumerate(self._sections):
f.write(as_unicode(section[0]))
f.write(u":\n")
f.write((u"=" * (len(section[0]) + 1)) + u"\n")
f.write(as_unicode(section[1]))
if index < len(self._sections) - 1:
f.write(u"\n")
class ReportGenerator:
"""
A py.test plugin which collects the test-reports and saves them to a separate file per test
"""
def __init__(self, output_dir):
self._reports = {}
self._output_dir = output_dir
#pytest.hookimpl(tryfirst=True, hookwrapper=True)
def pytest_runtest_makereport(self, item, call):
outcome = yield
# Generating the fully-qualified name of the underlying test
fqn = assemble_test_fqn(item)
# Getting the already existing report for the given test from our internal dict or creating a new one if it's not already present
# We need to do this as this method will be invoked for each phase (setup, call, teardown)
if fqn not in self._reports:
report = TestReport(fqn)
self._reports.update({fqn: report})
else:
report = self._reports[fqn]
result = outcome.result
# Appending the sections for the current phase to the test-report
report.add_sections(result.sections)
# If we have an error, we add that as well to the test-report
if hasattr(result, "longrepr") and result.longrepr is not None:
error = result.longrepr
error_text = ""
if isinstance(error, str) or isinstance(error, unicode):
error_text = as_unicode(error)
elif isinstance(error, tuple):
error_text = u"\n".join([as_unicode(e) for e in error])
elif hasattr(error, "reprcrash") and hasattr(error, "reprtraceback"):
if error.reprcrash is not None:
error_text += str(error.reprcrash)
if error.reprtraceback is not None:
if error_text != "":
error_text += "\n\n"
error_text += str(error.reprtraceback)
else:
error_text = as_unicode(error)
report.add_error(error_text)
# Finally saving the report
# We need to do this for all phases as we don't know if and when a test would fail
# This will essentially override the previous log file for a test if we are in a newer phase
report.save_to_file("%s/all_test_logs" % self._output_dir)
def pytest_configure(config):
config._report_generator = ReportGenerator("result")
config.pluginmanager.register(config._report_generator)

Is there a way to convert juniper "json" or "xml" config to "set" or "show" config?

We use juniper hardware with junos version 15. In this version we can export our config as "json" or "xml" which we want to use to edit it with our automation tooling.
Importing however is only possible in "set" or "show" format.
Is there a tool to convert "json" or "xml" format to "set" or "show" format?
I can only find converters between "show" and "set".
We can't upgrade to version 16 where the import of "json" would be possible.
Here's a script I made at work, throw it in your bin and you can it via providing a filename or piping output. This assumes linux or mac so the os.isatty function works, but the logic can work anywhere:
usage demo:
person#laptop ~ > head router.cfg
## Last commit: 2021-04-20 21:21:39 UTC by vit
version 15.1X12.2;
groups {
BACKBONE-PORT {
interfaces {
<*> {
mtu 9216;
unit <*> {
family inet {
mtu 9150;
person#laptop ~ > convert.py router.cfg | head
set groups BACKBONE-PORT interfaces <*> mtu 9216
set groups BACKBONE-PORT interfaces <*> unit <*> family inet mtu 9150
set groups BACKBONE-PORT interfaces <*> unit <*> family inet6 mtu 9150
set groups BACKBONE-PORT interfaces <*> unit <*> family mpls maximum-labels 5
<... output removed... >
convert.py:
#!/usr/bin/env python3
# Class that attempts to parse out Juniper JSON into set format
# I think it works? still testing
#
# TODO:
# accumulate annotations and provide them as commands at the end. Will be weird as annotations have to be done after an edit command
from argparse import ArgumentParser, RawTextHelpFormatter
import sys, os, re
class TokenStack():
def __init__(self):
self._tokens = []
def push(self, token):
self._tokens.append(token)
def pop(self):
if not self._tokens:
return None
item = self._tokens[-1]
self._tokens = self._tokens[:-1]
return item
def peek(self):
if not self._tokens:
return None
return self._tokens[-1]
def __str__(self):
return " ".join(self._tokens)
def __repr__(self):
return " ".join(self._tokens)
def main():
# get file
a = ArgumentParser(prog="convert_jpr_json",
description="This program takes in Juniper style JSON (blah { format) and prints it in a copy pastable display set format",
epilog=f"Either supply with a filename or pipe config contents into this program and it'll print out the display set view.\nEx:\n{B}convert_jpr_json <FILENAME>\ncat <FILENAME> | convert_jpr_json{WHITE}",
formatter_class=RawTextHelpFormatter)
a.add_argument('file', help="juniper config in JSON format", nargs="?")
args = a.parse_args()
if not args.file and os.isatty(0):
a.print_help()
die("Please supply filename or provide piped input")
file_contents = None
if args.file:
try:
file_contents = open(args.file, "r").readlines()
except IOError as e:
die(f"Issue opening file {args.file}: {e}")
print(output_text)
else:
file_contents = sys.stdin.readlines()
tokens = TokenStack()
in_comment = False
new_config = []
for line_num, line in enumerate(file_contents):
if line.startswith("version ") or len(line) == 0:
continue
token = re.sub(r"^(.+?)#+[^\"]*$", r"\1", line.strip())
token = token.strip()
if (any(token.startswith(_) for _ in ["!", "#"])):
# annotations currently not supported
continue
if token.startswith("/*"):
# we're in a comment now until the next token (this will break if a multiline comment with # style { happens, but hopefully no-one is that dumb
in_comment = True
continue
if "inactive: " in token:
token = token.split("inactive: ")[1]
new_config.append(f"deactivate {tokens} {token}")
if token[-1] == "{":
in_comment = False
tokens.push(token.strip("{ "))
elif token[-1] == "}":
if not tokens.pop():
die("Invalid json supplied: unmatched closing } encountered on line " + f"{line_num}")
elif token[-1] == ";":
new_config.append(f"set {tokens} {token[:-1]}")
if tokens.peek():
print(tokens)
die("Unbalanced JSON: expected closing }, but encountered EOF")
print("\n".join(new_config))
def die(msg): print(f"\n{B}{RED}FATAL ERROR{WHITE}: {msg}"); exit(1)
RED = "\033[31m"; GREEN = "\033[32m"; YELLOW = "\033[33m"; B = "\033[1m"; WHITE = "\033[0m"
if __name__ == "__main__": main()
You can load XML configuration using edit-config RPC or load-configuration RPC. For more details:
https://www.juniper.net/documentation/en_US/junos/topics/reference/tag-summary/netconf-edit-config.html
https://www.juniper.net/documentation/en_US/junos/topics/reference/tag-summary/junos-xml-protocol-load-configuration.html
XML content can be loaded via an "op" script by placing the content inside a call to junos:load-configuration() template defined in "junos.xsl". Something like the following:
version 1.1;
ns jcs = "http://xml.juniper.net/junos/commit-scripts/1.0";
import "../import/junos.xsl";
var $arguments = {
<argument> {
<name> "file";
<description> "Filename of XML content to load";
}
<argument> {
<name> "action";
<description> "Mode for the load (override, replace, merge)";
}
}
param $file;
param $action = "replace";
match / {
<op-script-results> {
var $configuration = slax:document($file);
var $connection = jcs:open();
call jcs:load-configuration($connection, $configuration, $action);
}
}
Thanks,
Phil

ORA-06550: PLS-00103: Encountered the symbol "" with mybatis TypeHandler

I am using Typehandler to map a List<Dep> to oracle array of ... here is the setPArameter method in the handler :
public void setParameter(PreparedStatement ps, int i, List<Dep> parameter, JdbcType jdbcType)
throws SQLException {
Connection connection = ps.getConnection();
// StructDescriptor structDescriptor = StructDescriptor.createDescriptor("MEMS_ARR", connection);
Struct[] structs = null;
if(parameter != null && parameter.size() >0) {
structs = new Struct[parameter.size()];
for (int index = 0; index < parameter.size(); index++)
{
Dep dep = parameter.get(index);
Object[] params = new Object[7];
params[0] = dep.getOrder();
params[1] = dep.getIdTp;
params[2] = dep.getId();
params[3] = " ";
params[4] = " ";
params[5] = " ";
params[6] = " ";
// STRUCT struct = new STRUCT(structDescriptor, ps.getConnection(), params);
structs[index] = connection.createStruct("MEMS", params);
}
// ArrayDescriptor desc = ArrayDescriptor.createDescriptor("MEMS_ARR", ps.getConnection());
// ARRAY oracleArray = new ARRAY(desc, ps.getConnection(), structs);
}else {
parameter = new ArrayList<DependentDTO>();
structs= new Struct[0];
}
this.parameter = parameter;
Array oracleArray = ((OracleConnection) connection).createOracleArray("MEMS_ARR", structs);
ps.setArray(i, oracleArray);
}
and here is the MEMS type :
create or replace TYPE MEMS AS OBJECT
( MEM1 NUMBER(2,0),
MEM2 VARCHAR2(1),
MEM3 VARCHAR2(15),
MEM4 VARCHAR2(60),
MEM5 VARCHAR2(1),
MEM6 VARCHAR2(40),
MEM7 VARCHAR2(10)
);
and here is the portion of the xml mapping file that uses the Typehandler :
#{nat,javaType=String,jdbcType=VARCHAR,mode=IN}, --nat
**#{deps,javaType=List,jdbcType=ARRAY,mode=IN,jdbcTypeName=MEMS_ARR,typeHandler=com.my.package.MyHandler}, --mems**
#{res,javaType=String,jdbcType=VARCHAR,mode=OUT} --res
the error log is as follows :
Error querying database. Cause: java.sql.SQLException: ORA-06550: line 31, column 5: PLS-00103: Encountered the symbol "" when expecting one of the following: . ( ) , * # % & = - + < / > at in is mod remainder not rem => <an exponent (**)> <> or != or ~= >= <= <> and or like like2 like4 likec between || indicator multiset member submultiset The symbol "(" was substituted for "" to continue. ORA-06550: line 44, column 4: PLS-00103: Encountered the symbol ";" when expecting one of the following: . ( ) , * % & = - + < / > at in is mod remainder not rem => <an exponent (**)> <> or != or ~= >= <= <> and or like like2 like4 likec between || multiset ### The error may exist in file [E:\path\to\mapper\ADao.xml] ### The error may involve my.package.ADao.mthodToCall -Inline ### The error occurred while setting parameters ### SQL: {call MY_PROC( ... , --nat?, **--mems? --res**)}
As you can see in the logs, the mems is replaced by empty string or is merged with the next arg res ... the comma is not there
Also kindly note that I already debugged inside the mybatis code and realized that the mapping setParameter method is called and the input List is mapped correctly to the oracle array ... the issue happens at the time of real calling
The issue actually was that I simply missed one comma between two previous parameters ... but the error pointed to the wrong parameter to look at

Generate an Odoo downloadable CSV report

I need to provide a button in Sale Order Form View to export order lines to CSV with a specific format. I have searched a lot but I only have found custom modules that don't satisfy the solution because users shouldn't have to select fields.
UPDATE: Solution
I ended up doing the following solution, thanks to #phillip-stack and his answer:
Model
# -*- coding: utf-8 -*-
import csv, sys
from openerp import api, fields, models, _
class sale_order_export_line(models.Model):
_inherit = 'sale.order'
_auto = False
#api.multi
def export_lines_to_csv(self):
return {
'type' : 'ir.actions.act_url',
'url': '/csv/download/sale_order/%s/supplier_name/%s'%(self.id,'American'),
'target': 'blank',
}
#api.model
def _csv_download(self,vals):
order_id = vals.get('order_id')
supplier_name = vals.get('supplier_name')
so = self.env['sale.order'].browse(order_id)
lines = so.order_line.search([('order_id','=',order_id),('supplier_name','ilike',supplier_name)])
columns = [u'Número pedido Dentaltix',u'Nombre de cliente',u'Dirección', u'Código postal', u'Población',
u'Provincia', u'País', u'Teléfono', u'Horario de entrega', u'Referencia', u'Cantidad', u'Envío']
csv = u','.join(columns)
csv += "\n"
if len(lines) > 0:
for ol in lines:
drupal_order_name = so.drupal_order_name if so.drupal_order_name else ''
client_notes = so.client_notes if so.client_notes else ''
supplier_ref = ol.supplier_ref if ol.supplier_ref else ''
picking_policy = DELIVERY_METHODS[so.picking_policy] if so.picking_policy else 'Directo'
product_uos_qty = str(int(ol.product_uos_qty)) if ol.product_uos_qty else '0'
csv_row = u'","'.join(data)
csv += u"\"{}\"\n".format(csv_row)
return csv
sale_order_export_line()
Controller
# -*- coding: utf-8 -*-
from openerp import http
from openerp.http import request
from openerp.addons.web.controllers.main import serialize_exception,content_disposition
class SaleOrderController(http.Controller):
#http.route('/csv/download/sale_order/<int:order_id>/supplier_name/<string:supplier_name>', auth='user')
def sale_order_lines_csv_download(self, order_id, supplier_name, **kw):
if supplier_name:
csv = http.request.env['sale.order']._csv_download({'order_id': order_id, 'supplier_name':supplier_name})
else:
csv = http.request.env['sale.order']._csv_download({'order_id': order_id, 'supplier_name': False})
filename = 'order_lines_%s_%s.csv'%(order_id,supplier_name)
return request.make_response(csv,
[('Content-Type', 'application/octet-stream'),
('Content-Disposition', 'attachment; filename="%s"'%(filename))])
I probably should not be admitting to this. But I use a technique with controllers and adding a link to a form. The controller can be modified to conduct custom security checks of your choosing then you can use sudo() to bypass field restrictions on the models in question. Then just return the csv with the format of your choosing.
And of course... An Example!
CONTROLLER
#http.route('/csv/download/<int:rec_id>/', auth='user', website=True)
def csvdownload(self, rec_id, **kw):
return http.request.env['your_addon.your_model']._csv_download({'rec_id': rec_id})
MODEL METHOD
def _get_csv_url(self):
self.csv_url = "/csv/download/{}/".format(self.id)
csv_url = fields.Char(compute=_get_csv_url)
#api.model
def _csv_download(self,vals):
sql = """SELECT
quote_nullable(field_1),
quote_nullable(field_2),
quote_nullable(field_3),
quote_nullable(field_4)
FROM
table_name
WHERE id={}""".format(vals.get(rec_id))
self.env.cr.execute(sql)
rows = self.env.cr.fetchall()
csv = """'Field 1','Field 2','Field 3','Field 4'\n"""
if rows:
for row in rows:
csv_row = ""
for item in row:
csv_row+= "{},".format(item)
csv+="{}\n".format(csv_row[:-1])
return csv
In your form have a link which points to your controller
<a id="csv_download" href="#" target="_blank" download="file.csv"/>
<div id="csv_url_div" style="display:none"><field name="csv_url"/></div>
<script>
$(document).ready(function(){
var csv_url = $("#csv_url_div").text();
$("#csv_download").attr("href", csv_url);
});
</script>
I acknowledge the level of hackyness that is going on here. I am sure if I spent more time on it I could do something with a nice Odoo widget that would be quite nice. But it has worked for me.

leafletR map doesn't load in shiny on start

I have the following little piece of code (more less as described HERE) - I want to control the number of points to be shown by a slider in shiny. You can see that the initial map is loaded after a little while (watch console output), but it will only show up after you used the slider once.
But I'd like the map to show up after it is created during launch of the shiny app - any hints how to do that?
## app.R ##
library(shiny)
library(shinydashboard)
library(httr)
library(leafletR)
data(quakes)
# dest_dir=tempdir()
dest_dir="foo_map"
dest_file = paste(dest_dir,"quakes","quakes.html",sep="\\")
dat = quakes
createMapHTML <- function(inputFreq=1) {
q.dat <- toGeoJSON(data=dat[seq(from = 1, to = nrow(dat), by=inputFreq), ],
dest=dest_dir, name="quakes")
sty <- styleSingle(col="darkblue", fill="darkblue", rad=6)
# create map
q.map <- leaflet(data=q.dat, dest=dest_dir, size = c(1200, 600), incl.data=TRUE,
base.map=list("osm"), style=sty, popup="*", controls="all")
}
# createMapHTML()
runApp(list(
ui = dashboardPage(
dashboardHeader(title = "quakes"),
dashboardSidebar(
sliderInput("slider", "#observations frequency:", 1, 100, 1)
),
dashboardBody(
htmlOutput("inc")
)
),
server = function(input, output, session) {
createMap <- reactive({
createMapHTML(input$slider)
return(includeHTML(dest_file))
})
output$inc<-renderUI({ createMap() })
}
))
so the bottleneck with the leafletR package is the conversion to GeoJson. Additionally the "includeHTML & htmlOutput" workaround for embedding the html out is flaky..
To avoid both I just switched to the leaflet packackage:
## app.R ##
library(shiny)
library(shinydashboard)
library(leaflet)
data(quakes)
dat = quakes
runApp(list(
ui = dashboardPage(
dashboardHeader(title = "quakes"),
dashboardSidebar(
sliderInput("slider", "#observations frequency:", 1, 100, 1)
),
dashboardBody(
leafletOutput("map", height = 600)
)
),
server = function(input, output) {
output$map <- renderLeaflet({
map <- leaflet() %>% addTiles()
map %>% addCircles(data=dat[seq(from = 1, to = nrow(dat), by=input$slider), ], #input$slider
lat = ~lat, lng = ~long, fillOpacity = 1.0)
})
}
))