locust 0.9 to 1.3 Exception: No tasks defined. use the #task decorator or set the tasks property of the User - locust

I have the following code which run fine in locust 0.9. Now with 1.3 it throws the exception mentioned in the title. Can anyone see what's wrong?
import time
import random
import datetime
import requests
from requests.packages.urllib3.exceptions import InsecureRequestWarning
import logging
import json
import os
from random import randint, choice
from locust import HttpUser, TaskSet, task
from pyquery import PyQuery
requests.packages.urllib3.disable_warnings()
class FrontPage(TaskSet):
def on_start(self):
self.client.verify = False
#task(20)
def index(self):
self.client.get("/")
class DestinationPagesFixed(TaskSet):
de_paths = ["/belgien", "daenemark", "deutschland", "frankreich", "griechenland"
, "italien"
, "luxemburg"
]
def on_start(self):
self.client.verify = False
#task
def test_1(self):
paths = self.de_paths
path = choice(paths)
self.client.get(path, name="Static page")
class UserBehavior(TaskSet):
tasks = {FrontPage: 15, DestinationPagesFixed: 19}
class WebsiteUser(HttpUser):
task_set = UserBehavior
min_wait = 400
max_wait = 10000

Change
task_set = UserBehavior
to
tasks = [UserBehavior]
Or (skipping your UserBehaviour class entirely)
tasks = {FrontPage: 15, DestinationPagesFixed: 19}

Related

app celery task depends of BaseClass in Celery

I want to create Base class for all tasks in Celery my code is
tasks/all/test.py
from celery import app
from base.main import CeleryMain
#app.task(time_limit=10)
def test():
task = CeleryMain.delay()
return task
base/main.py
from celery import app
import requests
from celery import Celery, Task
class CeleryMain(app.Task):
abstract = True
def run(self, task):
data = task.apply_async()
s = data.get(timeout=10, interval=0.01)
return {'success': True, 'data': s}
task = CeleryMain()
app.register_task(task)
task.delay()
celery.py
imports = (
'tasks.all.test',
)
I have error:
celery.exceptions.NotRegistered: 'tasks.all.test'
Please help me on how to create main class for all tasks correctly without mistakes for each task Base class.
CeleryMain= app.register_task(CeleryMain())
task = CeleryMain.run(task)
print(task.id)

Error in running Apache Beam Python SplittableDoFn

Error encountered while trying pubsub io > splittable dofn
RuntimeError: Transform node
AppliedPTransform(ParDo(TestDoFn)/ProcessKeyedElements/GroupByKey/GroupByKey,
_GroupByKeyOnly) was not replaced as expected.
Can someone help me with reviewing the code for anything I might be doing incorrectly in there
Code:
"""
python examples/test_restriction_unbounded.py --project mk2 --topic projects/mk2/topics/testing
"""
# pytype: skip-file
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import csv
import logging
import sys
import time
from datetime import datetime
import apache_beam as beam
from apache_beam.options.pipeline_options import GoogleCloudOptions
from apache_beam.options.pipeline_options import PipelineOptions
from apache_beam.options.pipeline_options import SetupOptions
from apache_beam.options.pipeline_options import StandardOptions
from apache_beam.io.restriction_trackers import OffsetRestrictionTracker, OffsetRange
from apache_beam.transforms.core import RestrictionProvider
class TestProvider(RestrictionProvider):
def initial_restriction(self, element):
return OffsetRange(0, 1)
def create_tracker(self, restriction):
return OffsetRestrictionTracker(restriction)
def restriction_size(self, element, restriction):
return restriction.size()
class TestDoFn(beam.DoFn):
def process(
self,
element,
restriction_tracker=beam.DoFn.RestrictionParam(
TestProvider())):
import pdb; pdb.set_trace()
cur = restriction_tracker.current_restriction().start
while restriction_tracker.try_claim(cur):
return element
def run(argv=None, save_main_session=True):
parser = argparse.ArgumentParser()
parser.add_argument('--topic', type=str, help='Pub/Sub topic to read from')
args, pipeline_args = parser.parse_known_args(argv)
options = PipelineOptions(pipeline_args)
options.view_as(StandardOptions).streaming = True
with beam.Pipeline(options=options) as p:
# data = ['abc', 'defghijklmno', 'pqrstuv', 'wxyz']
# actual = (p | beam.Create(data) | beam.ParDo(ExpandingStringsDoFn()))
scores = p | beam.io.ReadFromPubSub(topic=args.topic) | beam.ParDo(TestDoFn())
if __name__ == '__main__':
logging.getLogger().setLevel(logging.INFO)
run()
You are ingesting data from pub/sub by steaming. Then you have to create batches by window before apply this kind of transforms: (ParDo(TestDoFn)/ProcessKeyedElements/GroupByKey/GroupByKey, _GroupByKeyOnly)
Pub/Sub with window example: https://cloud.google.com/pubsub/docs/pubsub-dataflow
Try to do like this:
class GroupWindowsIntoBatches(beam.PTransform):
"""A composite transform that groups Pub/Sub messages
"""
def __init__(self, window_size):
# Convert minutes into seconds.
self.window_size = int(window_size * 60)
def expand(self, pcoll):
return (
pcoll
# Assigns window info to each Pub/Sub message based on its
# publish timestamp.
| "Window into Fixed Intervals"
>> beam.WindowInto(window.FixedWindows(self.window_size))
)
def run(argv=None, save_main_session=True):
parser = argparse.ArgumentParser()
parser.add_argument('--topic', type=str, help='Pub/Sub topic to read from')
args, pipeline_args = parser.parse_known_args(argv)
options = PipelineOptions(pipeline_args)
options.view_as(StandardOptions).streaming = True
window_size = 1.0
with beam.Pipeline(options=options) as p:
scores = (p
| beam.io.ReadFromPubSub(topic=args.topic)
| "WindowInto" >> GroupWindowsIntoBatches(window_size)
| beam.ParDo(TestDoFn())
)
I had the same error. Removing the streaming option solved the problem for me.

Why "missing parameter type error" when i run scala REPL in Flink with Java?

When I run the flink scala REPL script in java cannot compile.
I tried this java code to run Flink scala REPL for test, bug always exception.
Settings settings = new Settings();
((MutableSettings.BooleanSetting) settings.usejavacp()).value_$eq(true);
IMain main = new IMain(settings, new PrintWriter(System.out));
// Thread.currentThread().setContextClassLoader(main.classLoader());
for (String imp : imports) {
main.interpret(MessageFormat.format("import {0}", imp));
}
ExecutionEnvironment env = ExecutionEnvironment.createLocalEnvironment();
String script = FileUtils.readFileToString(new File("/opt/project/security-detection/sappo/src/sappo-interpreter/src/test/resources/demo.txt"), StandardCharsets.UTF_8);
main.bind(new NamedParamClass("env", ExecutionEnvironment.class.getName(), env));
main.interpret(script);
scala text
val text = env.fromElements("Who's there?", "I think I hear them. Stand, ho! Who's there?")
// result 1
val counts = text.flatMap { _.toLowerCase.split("\\W+") filter { _.nonEmpty } } map { (_, 1) } groupBy(0) sum(1)
counts.print()
// result 2
val counts = text.map((x:String) => 1)
counts.print()
// result 3
text.print()
result 1
import org.apache.flink.core.fs._
import org.apache.flink.core.fs.local._
import org.apache.flink.api.common.io._
import org.apache.flink.api.common.aggregators._
import org.apache.flink.api.common.accumulators._
import org.apache.flink.api.common.distributions._
import org.apache.flink.api.common.operators._
import org.apache.flink.api.common.operators.base.JoinOperatorBase.JoinHint
import org.apache.flink.api.common.functions._
import org.apache.flink.api.java.io._
import org.apache.flink.api.java.aggregation._
import org.apache.flink.api.java.functions._
import org.apache.flink.api.java.operators._
import org.apache.flink.api.java.sampling._
import org.apache.flink.api.scala._
import org.apache.flink.api.scala.utils._
import org.apache.flink.streaming.api.scala._
import org.apache.flink.streaming.api.windowing.time._
env: org.apache.flink.api.java.ExecutionEnvironment = Local Environment (parallelism = 8) : ee335d29eefca69ee5fe7279414fc534
console:67: error: missing parameter type for expanded function ((x$1) => x$1.toLowerCase.split("\\W+").filter(((x$2) => x$2.nonEmpty)))
val counts = text.flatMap { _.toLowerCase.split("\\W+") filter { _.nonEmpty } } map { (_, 1) } groupBy(0) sum(1)
result 2
import org.apache.flink.core.fs._
import org.apache.flink.core.fs.local._
import org.apache.flink.api.common.io._
import org.apache.flink.api.common.aggregators._
import org.apache.flink.api.common.accumulators._
import org.apache.flink.api.common.distributions._
import org.apache.flink.api.common.operators._
import org.apache.flink.api.common.operators.base.JoinOperatorBase.JoinHint
import org.apache.flink.api.common.functions._
import org.apache.flink.api.java.io._
import org.apache.flink.api.java.aggregation._
import org.apache.flink.api.java.functions._
import org.apache.flink.api.java.operators._
import org.apache.flink.api.java.sampling._
import org.apache.flink.api.scala._
import org.apache.flink.api.scala.utils._
import org.apache.flink.streaming.api.scala._
import org.apache.flink.streaming.api.windowing.time._
env: org.apache.flink.api.java.ExecutionEnvironment = Local Environment (parallelism = 8) : 5cbf8e476ebf32fd8fdf91766bd40af0
console:71: error: type mismatch;
found : String => Int
required: org.apache.flink.api.common.functions.MapFunction[String,?]
val counts = text.map((x:String) => 1)
result 3
import org.apache.flink.core.fs._
import org.apache.flink.core.fs.local._
import org.apache.flink.api.common.io._
import org.apache.flink.api.common.aggregators._
import org.apache.flink.api.common.accumulators._
import org.apache.flink.api.common.distributions._
import org.apache.flink.api.common.operators._
import org.apache.flink.api.common.operators.base.JoinOperatorBase.JoinHint
import org.apache.flink.api.common.functions._
import org.apache.flink.api.java.io._
import org.apache.flink.api.java.aggregation._
import org.apache.flink.api.java.functions._
import org.apache.flink.api.java.operators._
import org.apache.flink.api.java.sampling._
import org.apache.flink.api.scala._
import org.apache.flink.api.scala.utils._
import org.apache.flink.streaming.api.scala._
import org.apache.flink.streaming.api.windowing.time._
env: org.apache.flink.api.java.ExecutionEnvironment = Local Environment (parallelism = 8) : ee335d29eefca69ee5fe7279414fc534
Who's there?
I think I hear them. Stand, ho! Who's there?
text: org.apache.flink.api.java.operators.DataSource[String] = org.apache.flink.api.java.operators.DataSource#53e28097
PASSED: testIMain
PASSED: testIMainScript
Try using the Scala REPL that comes with Flink:
$ bin/start-scala-shell.sh local
I tried the three examples you shared (with Flink 1.7.0), and they all worked just fine.

Getting error while trying to insert data into MongoDB

I am trying to insert data into MongoDB using Play-scala and ReactiveMongo.
Here is my DbimpService.scala:
package services
import models.Post
import reactivemongo.bson.BSONDocument
import reactivemongo.api.MongoDriver
import reactivemongo.api.collections.bson.BSONCollection
import scala.concurrent.ExecutionContext
import javax.inject.Inject
import play.api.libs.json.Json
import reactivemongo.play.json.collection.JSONCollection
import reactivemongo.api.commands.WriteResult
import scala.concurrent.Future
import org.apache.xerces.util.DatatypeMessageFormatter
class Dbimpservice #Inject() (implicit ec:ExecutionContext) extends Dbservice {
def create(p:Post):String={
var status = "Not Saved"
val driver = new MongoDriver
val connection = driver.connection(List("localhost"))
val db = connection("application")
val collection = db[BSONCollection]("post")
val futureList = collection.insert[Post](p)
futureList.onComplete { case sucess => println(sucess) }
return status
}
}
Here is my HomeController.scala:
package controllers
import javax.inject._
import play.api._
import play.api.mvc._
import models._
import scala.util.{ Failure, Success }
import scala.concurrent.Future
import scala.concurrent.ExecutionContext.Implicits.global
import reactivemongo.api.{ MongoDriver, MongoConnection }
import reactivemongo.play.json.collection.JSONCollection
import reactivemongo.bson.BSONDocument
import reactivemongo.api.commands.WriteResult
import reactivemongo.api.collections.bson.BSONCollection
import play.api.libs.json.Json
import services.Dbservice
import services.Dbimpservice
import services.Dbservice
import scala.concurrent.ExecutionContext
import scala.concurrent.Await
import scala.concurrent.duration.Duration
/**
* This controller creates an `Action` to handle HTTP requests to the
* application's home page.
*/
#Singleton
class HomeController #Inject() (implicit ec:ExecutionContext,val Dbservice : Dbimpservice)extends Controller {
/**
* Create an Action to render an HTML page with a welcome message.
* The configuration in the `routes` file means that this method
* will be called when the application receives a `GET` request with
* a path of `/`.
*/
def index = Action{
Ok("Hai")
}
def read = Action.async {
val query = BSONDocument()
val driver = new MongoDriver
val connection = driver.connection(List("localhost:27017"))
val db = connection("application")
val collection = db[BSONCollection]("post")
val futureList = collection.find(query).cursor[Post]().collect[List]()
futureList.map { list =>
Ok(list.toString())
}
}
def create = Action(BodyParsers.parse.json) { request =>
val personResult = request.body.validate[Post]
personResult.fold(
errors => {
BadRequest(Json.obj("status " ->"ERROR"))
},
valid = fun
)
}
def fun:Post => Result= { post =>
var ans = Dbservice.create(post)
Ok(ans)
}
}
I am trying to insert the data but not getting inserted and the error which i am getting is
Failure(reactivemongo.core.errors.ConnectionNotInitialized: MongoError['Connection is missing metadata (like protocol version, etc.) The connection pool is probably being initialized.'])
Some one please help me, I even referred the link
http://stackoverflow.com/questions/31456517/embedmongo-with-reactivemongo-process-does-not-exit
but did not get
Guessing that you are using a recent version of ReactiveMongo (0.11.7+), you are using a deprecated DB resolution code (connection(dbName) aka connection.apply(dbName).
See also
You need to use the asynchronous resolution, which benefit from the failover (to handle possible network latency/incident). The following code must so be refactored.
val db = connection("application")
val collection = db[BSONCollection]("post")
val futureList = collection.insert[Post](p)
Using the new DB resolution:
for {
db <- connection.database("application")
collection = db("post")
res <- collection.insert(p)
} yield res

Spray cache for get service?

i am using cache headers like no-cache and no-store, i don´t know to do application level caching (maybe i could need some documentation here)
i print in console the data result when i call the method in mongodb, but it only works once after i run my app(that ocur in get service), the second time my app doesn´t print nothing, that is, it doesn't call the method.... that ocur when i try get a list of users the second time... for example, when i post something like insert new a user. i need to see the changes in the db in the frontend, my app seems get data from cache and it does´nt call the method to get the users again
the code I use in my spray scala service is
`package api
import spray.routing.Directives
import akka.actor.ActorRef
import spray.http.MediaTypes._
import core.UserRegister
import core.User
import scala.concurrent.ExecutionContext
import core.{User, LoginActor}
import akka.util.Timeout
import LoginActor._
import spray.http._
import scala.Some
import spray.json.JsonFormat
import spray.json.RootJsonFormat
import spray.json.JsArray
import spray.json.CollectionFormats
import spray.json._
import DefaultJsonProtocol._
import scala.util.parsing.json.JSONArray
import scala.util.parsing.json._
import data.UserDao
import core.UserListActor
import spray.routing.Route
import core.CoreActors
import spray.routing.HttpService
import core.Core
import akka.actor.{Props, ActorRefFactory, ActorSystem}
import akka.actor.ActorContext
import spray.routing.HttpServiceActor
import spray.http.HttpHeaders.RawHeader
import scala.concurrent.duration.Duration
import spray.routing.authentication.BasicAuth
import spray.routing.directives.CachingDirectives._
import spray.httpx.encoding._
import spray.caching._
import spray.caching.{LruCache, Cache}
import spray.caching.Cache
import web.StaticResources
import scala.concurrent.Future
class ListarUsuarioService(listaUsuario: ActorRef)(implicit executionContext: ExecutionContext)
extends Directives with DefaultJsonFormats with SprayCORSsupport with CORSSupport{
import akka.pattern.ask
import scala.concurrent.duration._
implicit val userFormat = jsonFormat2(User)
implicit val registerFormat = jsonFormat1(Register)
implicit val userRegisterFormat = jsonFormat5(UserRegister)
implicit val registeredFormat = jsonObjectFormat[Registered.type]
implicit val notRegisteredFormat = jsonObjectFormat[NotRegistered.type]
implicit val system = ActorSystem()
import system.dispatcher
lazy val simpleRouteCache = routeCache()
lazy val simpleCache = routeCache(maxCapacity = 5000, timeToIdle = 0.001 hour)
//lazy val cache = LruCache()
def routeCache(maxCapacity: Int = 2000, initialCapacity: Int = 100, timeToLive: Duration = 5 seconds,
timeToIdle: Duration = Duration.Inf): Cache[RouteResponse] =
LruCache(maxCapacity, initialCapacity, timeToLive, timeToIdle)
// and a Cache for its result type
val cache2: Cache[Double] = LruCache()
val listaUsuariosroute:Route =
cache(routeCache()){
cors{ addCORSDefaultSupport(){
path("usuario") {
get {
respondWithMediaType(`application/json`) {
_.complete {
//Elemento de la lista
//listaUsuarios(1)
UserListActor.listaUsuarios.toJson.convertTo[JsArray].prettyPrint }
}
}
}
}
}
}//cors
}
`
I am using Cors and cache headers like no-store, public and no-cache, but it doesn´t works, even i clear my cache browser but it neither works