I'm TDDing RxJS solution and using bufferWithTime Why does the res in example code get value [] (empty array)? Is it a problem in my code or the RxJS library? Running it on node.js v0.10.30 with rx version 2.2.27.
Following can be run in nodejs coffeescript REPL
Rx = require 'rx'
onNext = Rx.ReactiveTest.onNext
TEST_EVENT_A = { messageName: 'test_event_a', namespace: 'test' }
events = [onNext(50, TEST_EVENT_A), onNext(100, TEST_EVENT_A)]
scheduler = new Rx.TestScheduler
stream = scheduler.createHotObservable events
excludeEmpty = (event) -> console.log "Filtering...", event; event.length > 0
countValues = (event) -> console.log "Counting...", event; event.length
res = scheduler.startWithTiming((=> stream.bufferWithTime(10).filter(excludeEmpty).map(countValues)), 0, 0, 1000).messages
# => []
After debugging and reading docs, I noticed the following sentence in bufferedWithTime documentation: [scheduler=Rx.Scheduler.timeout] (Scheduler): Scheduler to run buffer timers on. If not specified, the timeout scheduler is used.
I had thought that when creating observable from TestScheduler, it will use that scheduler for all it's methods but the scheduler has to be passed in explicitly. So below is the correct solution:
print = (event) -> console.log "Event: ", event
Rx = require 'rx'
onNext = Rx.ReactiveTest.onNext
TEST_EVENT_A = { messageName: 'test_event_a', namespace: 'test' }
events = [onNext(50, TEST_EVENT_A), onNext(100, TEST_EVENT_A)]
scheduler = new Rx.TestScheduler
stream = scheduler.createHotObservable events
excludeEmpty = (event) -> console.log "Filtering...", event; event.length > 0
countValues = (event) -> console.log "Counting...", event; event.length
# Notice the 2nd argument to bufferWithTime
res = scheduler.startWithTiming((=> stream.bufferWithTime(100, scheduler).filter(excludeEmpty).map(countValues)), 0, 0, 1500).messages
# => res[0].value.value == 2
Related
In my airflow dag, I have an ecs_operator task followed by python operator task. I want to push some messages from ECS task to python task using xcom feature of airflow. I tried the option do_xcom_push=True with no result. Find below sample dag.
dag = DAG(
dag_name, default_args=default_args, schedule_interval=None)
start = DummyOperator(task_id = 'start'
,dag =dag)
end = DummyOperator(task_id = 'end'
,dag =dag)
ecs_operator_args = {
'launch_type': 'FARGATE',
'task_definition': 'task-def:2',
'cluster': 'cluster-name',
'region_name': 'region',
'network_configuration': {
'awsvpcConfiguration':
{}
}
}
ecs_task = ECSOperator(
task_id='x_com_test'
,**ecs_operator_args
,do_xcom_push=True
,params={'my_param': 'Parameter-1'}
,dag=dag)
def pull_function(**kwargs):
ti = kwargs['ti']
msg = ti.xcom_pull(task_ids='x_com_test',key='the_message')
print("received message: '%s'" % msg)
pull_task = PythonOperator(
task_id='pull_task',
python_callable=pull_function,
provide_context=True,
dag=dag)
start >> ecs_task >> pull_task >> end
You need to setup a cloudwatch log group for the container.
ECSOperator needs to be extended to support pushing to xcom:
from collections import deque
from airflow.utils import apply_defaults
from airflow.contrib.operators.ecs_operator import ECSOperator
class MyECSOperator(ECSOperator):
#apply_defaults
def __init__(self, xcom_push=False, **kwargs):
super(CLECSOperator, self).__init__(**kwargs)
self.xcom_push_flag = xcom_push
def execute(self, context):
super().execute(context)
if self.xcom_push_flag:
return self._last_log_event()
def _last_log_event(self):
if self.awslogs_group and self.awslogs_stream_prefix:
task_id = self.arn.split("/")[-1]
stream_name = "{}/{}".format(self.awslogs_stream_prefix, task_id)
events = self.get_logs_hook().get_log_events(self.awslogs_group, stream_name)
last_event = deque(events, maxlen=1).pop()
return last_event["message"]
dag = DAG(
dag_name, default_args=default_args, schedule_interval=None)
start = DummyOperator(task_id = 'start'
,dag =dag)
end = DummyOperator(task_id = 'end'
,dag =dag)
ecs_operator_args = {
'launch_type': 'FARGATE',
'task_definition': 'task-def:2',
'cluster': 'cluster-name',
'region_name': 'region',
'awslogs_group': '/aws/ecs/myLogGroup',
'awslogs_stream_prefix': 'myStreamPrefix',
'network_configuration': {
'awsvpcConfiguration':
{}
}
}
ecs_task = MyECSOperator(
task_id='x_com_test'
,**ecs_operator_args
,xcom_push=True
,params={'my_param': 'Parameter-1'}
,dag=dag)
def pull_function(**kwargs):
ti = kwargs['ti']
msg = ti.xcom_pull(task_ids='x_com_test',key='return_value')
print("received message: '%s'" % msg)
pull_task = PythonOperator(
task_id='pull_task',
python_callable=pull_function,
provide_context=True,
dag=dag)
start >> ecs_task >> pull_task >> end
ecs_task will take the last event from the log group before finishing executing, and push it to xcom.
Apache-AWS has a new commit that pretty much implements what #Бојан-Аџиевски mentioned above, so you don't need to write your custom ECSOperator. Available as of version 1.1.0
All you gotta do is to provide the do_xcom_push=True when calling the ECSOperator and provide the correct awslogs_group and awslogs_stream_prefix.
Make sure your awslogs_stream_prefix follows the following format:
prefix-name/container-name
As this is what ECS directs logs to.
I'm writing a custom ejabberd module. When I send a custom IQ stanza using strophe js, ejabberd processes the request and returns the result IQ back to the sender.
Below is the IQ request I send using strophe js,
connection.sendIQ($iq({
to: 'john#localhost',
type: 'set',
id: 'abc1234567890'
})
.c('query', {
xmlns: 'jabber:iq:custom_module',
msg_id: 'xyz9876543210'
})
.tree());
and this is the ejabberd module code,
-module(mod_custom_module).
-behaviour(gen_mod).
-define(NS_CUSTOM_MODULE, <<"jabber:iq:custom_module">>).
-export([start/2, stop/1, depends/2, mod_options/1, process_sm_iq/1, decode_iq_subel/1]).
-include("xmpp.hrl").
-include("logger.hrl").
-include("ejabberd_sql_pt.hrl").
start(_Host, _Opts) ->
gen_iq_handler:add_iq_handler(ejabberd_sm, _Host, ?NS_CUSTOM_MODULE, ?MODULE, process_sm_iq, one_queue).
stop(_Host) ->
gen_iq_handler:remove_iq_handler(ejabberd_sm, _Host, ?NS_CUSTOM_MODULE).
depends(_Host, _Opts) ->
[].
mod_options(_Host) ->
[].
-spec decode_iq_subel(xmpp_element()) -> xmpp_element();
(xmlel()) -> xmlel().
decode_iq_subel(El) ->
El.
-spec process_sm_iq(iq()) -> iq().
process_sm_iq(#iq{from = _From, to = _To, sub_els = _sub_els} = IQ) ->
% My module actions here...
[First | Rest] = _sub_els,
xmpp:make_iq_result(IQ, First).
After processing the IQ, I also want to notify the other user 'john#localhost' about the custom event. I tried to do this using ejabberd_router:route/3, but it did not work.
I don't know what I am doing wrong.
Update
When I use the following code, the other user is not receiving stanza.
NewIQ = #iq{id = _Id, type = result, to = _To, from = _From, sub_els = _sub_els},
ejabberd_router:route(xmpp:set_from_to(NewIQ, _From, _To)),
% or ejabberd_router:route(NewIQ),
% or ejabberd_sm:route(NewIQ),
And when I checked the debug console, it is showing the following message in it. Not sure whether this is relevant as it is just a debug type message and there is no other failure error message.
17:07:47.173 [debug] Dropping packet to unavailable resource:
#iq{id = <<"abc1234567890">>,type = result,lang = <<>>,
from = #jid{user = <<"nikhil">>,server = <<"localhost">>,
resource = <<"49230572059507447681762">>,luser = <<"nikhil">>,
lserver = <<"localhost">>,
lresource = <<"49230572059507447681762">>},
to = #jid{user = <<"john">>,server = <<"localhost">>,
resource = <<>>,luser = <<"john">>,
lserver = <<"localhost">>,lresource = <<>>},
sub_els = [#xmlel{name = <<"query">>,
attrs = [{<<"xmlns">>,<<"jabber:iq:custom_module">>},
{<<"msg_id">>,<<"xyz9876543210">>}],
children = []}],
meta = #{}}
Try this function. It sends a headline message to the destination account with some details about the original IQ.
process_sm_iq(#iq{from = From, to = To, sub_els = SubEls} = IQ) ->
[First | Rest] = SubEls,
MsgId = fxml:get_tag_attr_s(<<"msg_id">>, First),
Subject = "Event alert",
Body = "An IQ was received by custom_module with msg_id: "++MsgId,
Packet = #message{from = From,
to = To,
type = headline,
body = xmpp:mk_text(Body),
subject = xmpp:mk_text(Subject)},
ejabberd_router:route(Packet),
xmpp:make_iq_result(IQ, First).
I want to write a task that is only executable from within a given queue - if somebody tries to pass a different queue into the routing_key parameter of apply_async I want to raise an exception. How do I do this?
You could write your own task that would check to make sure a valid routing key is being passed in when apply_async is being called. You can also apply this to queues. Set up routes and queues in your config:
import celery
from kombu import Queue, Exchange
app = celery.Celery('app')
app.conf.CELERY_QUEUES = (
Queue('add', Exchange('default'), routing_key='good'),
)
app.conf.CELERY_ROUTES = {
'app.add': {
'queue': 'add',
'routing_key': 'good'
}
}
Now, create your own Task class that will perform the check on the routing key. You'll need to override apply_async:
class RouteCheckerTask(celery.Task):
abstract = True
def apply_async(self, args=None, kwargs=None, task_id=None, producer=None,
link=None, link_error=None, **options):
app = self._get_app()
routing_key = options.get('routing_key', None)
if routing_key:
valid_routes = [v['routing_key'] for k, v in app.conf.CELERY_ROUTES.items()]
is_valid = routing_key in valid_routes
if not is_valid:
raise NotImplementedError('{} is not a valid routing key. Options are: {}'.format(routing_key, valid_routes))
if app.conf.CELERY_ALWAYS_EAGER:
return self.apply(args, kwargs, task_id=task_id or uuid(), link=link, link_error=link_error, **options)
# add 'self' if this is a "task_method".
if self.__self__ is not None:
args = args if isinstance(args, tuple) else tuple(args or ())
args = (self.__self__, ) + args
return app.send_task(
self.name, args, kwargs, task_id=task_id, producer=producer,
link=link, link_error=link_error, result_cls=self.AsyncResult,
**dict(self._get_exec_options(), **options)
)
Base your tasks from this one and call apply_async normally:
#app.task(base=RouteCheckerTask)
def add(x, y):
return x + y
# Fails
add.apply_async([1, 2], routing_key='bad')
# Passes
add.apply_async([1, 2], routing_key='good')
i am losing messages in my tornado chat and i do not known how to detect when the message wasn't sent and to send the message again
there is any way to detect when the conexion get lost? and when the conexión restart send the message
this is my code
def get(self):
try:
json.dumps(MessageMixin.cache)
except KeyError:
raise tornado.web.HTTPError(404)
class MessageMixin(object):
waiters = {}
cache = {}
cache_size = 200
def wait_for_messages(self,cursor=None):
t = self.section_slug
waiters = self.waiters.setdefault(t, [])
result_future = Future()
waiters.append(result_future)
return result_future
def cancel_wait(self, future):
t = self.section_slug
waiters = self.waiters.setdefault(t, [])
waiters.remove(future)
# Set an empty result to unblock any coroutines waiting.
future.set_result([])
def new_messages(self, message):
t = self.section_slug
#cache = self.cache.setdefault(t, [])
#print t
#print self.waiters.setdefault(t, [])
waiters = self.waiters.setdefault(t, [])
for future in waiters:
try:
if message is not None:
future.set_result(message)
except Exception:
logging.error("Error in waiter callback", exc_info=True)
waiters = []
#self.cache.extend(message)
#if len(self.cache) > self.cache_size:
#self.cache = self.cache[-self.cache_size:]
class MessageNewHandler(MainHandler, MessageMixin):
def post(self, section_slug):
self.section_slug = section_slug
post = self.get_argument("html")
idThread = self.get_argument("idThread")
isOpPost = self.get_argument("isOpPost")
arg_not = self.get_argument("arg")
type_not = self.get_argument("type")
redirect_to = self.get_argument("next", None)
message= {"posts": [post],"idThread": idThread,"isOpPost": isOpPost,
"type": type_not,"arg_not": arg_not}
if redirect_to:
self.redirect(redirect_to)
else:
self.write(post)
self.new_messages(message)
class MessageUpdatesHandler(MainHandler, MessageMixin):
#gen.coroutine
def post(self, section_slug):
self.section_slug = section_slug
try:
self.future = self.wait_for_messages(cursor=self.get_argument("cursor", None))
data = yield self.future
if self.request.connection.stream.closed():
return
self.write(data)
except Exception:
raise tornado.web.HTTPError(404)
def on_connection_close(self):
self.cancel_wait(self.future)
class Application(tornado.web.Application):
def __init__(self):
handlers = [
(r"/api/1\.0/stream/(\w+)", MessageUpdatesHandler),
(r"/api/1\.0/streamp/(\w+)", MessageNewHandler)
]
tornado.web.Application.__init__(self, handlers)
def main():
tornado.options.parse_command_line()
app = Application()
port = int(os.environ.get("PORT", 5000))
app.listen(port)
tornado.ioloop.IOLoop.instance().start()
if __name__ == "__main__":
main()
In the original chatdemo, this is what the cursor parameter to wait_for_messages is for: the browser tells you the last message it got, so you can send it every message since then. You need to buffer messages and potentially re-send them in wait_for_messages. The code you've quoted here will only send messages to those clients that are connected at the time the message came in (and remember that in long-polling, sending a message puts the client out of the "waiting" state for the duration of the network round-trip, so even when things are working normally clients will constantly enter and leave the waiting state)
How can I close a WebSocket connection? The example on the documentation works if you want to close it immediately.
But how about the following case: I want to close the connection when some condition occurs in the future. For instance, when I receive a certain message from the client.
def indexWS = WebSocket.using[String] {
request => {
var channel: Option[Concurrent.Channel[String]] = None
var outEnumerator: Enumerator[String] = Concurrent.unicast(c => channel = Some(c))
val myIteratee: Iteratee[String, Unit] = Iteratee.foreach[String] {gotString => {
// received a string from the client
if (gotString == "close_me") {
// outEnumerator = Enumerator.eof // doesn't work
// outEnumerator >>> Enumerator.eof // doesn't work
}
}}
(myIteratee, outEnumerator)
}
}
Thank you for your help!
I got it: I had to go through the channel that I opened at
var outEnumerator: Enumerator[String] = Concurrent.unicast(c => channel = Some(c))
and the commented out block would become
if (gotString == "close_me") {
channel.foreach(_.eofAndEnd())
}
which will push an EOF through the enumerator and close the connection.