I am trying to reproduce the devign experiment. When joern is updated, some error messages appear in the graph-for-funcs.sc script that parses the bin file into JSON. I modified some, and there are still some errors as shown in the figure below. Did you encounter similar errors and how did you solve them?
graph-for-funcs.sc
import scala.jdk.CollectionConverters._
import io.circe.syntax._
import io.circe.generic.semiauto._
import io.circe.{Encoder, Json}
import io.shiftleft.semanticcpg.language.types.expressions.generalizations.CfgNode
import io.shiftleft.codepropertygraph.generated.EdgeTypes
import io.shiftleft.codepropertygraph.generated.NodeTypes
import io.shiftleft.codepropertygraph.generated.nodes
import io.shiftleft.dataflowengineoss.language._
import io.shiftleft.semanticcpg.language._
import io.shiftleft.semanticcpg.language.types.expressions.Call
import io.shiftleft.semanticcpg.language.types.structure.Local
import io.shiftleft.codepropertygraph.generated.nodes.MethodParameterIn
import overflowdb._
import overflowdb.traversal._
final case class GraphForFuncsFunction(function: String,
file: String,
id: String,
AST: List[nodes.AstNode],
CFG: List[nodes.AstNode],
PDG: List[nodes.AstNode])
final case class GraphForFuncsResult(functions: List[GraphForFuncsFunction])
implicit val encodeEdge: Encoder[Edge] =
(edge: Edge) =>
Json.obj(
("id", Json.fromString(edge.toString)),
("in", Json.fromString(edge.inNode.toString)),
("out", Json.fromString(edge.outNode.toString))
)
implicit val encodeNode: Encoder[nodes.AstNode] =
(node: nodes.AstNode) =>
Json.obj(
("id", Json.fromString(node.toString)),
("edges",
Json.fromValues((node.inE("AST", "CFG").l ++ node.outE("AST", "CFG").l).map(_.asJson))),
("properties", Json.fromValues(node.propertyMap.asScala.toList.map { case (key, value) =>
Json.obj(
("key", Json.fromString(key)),
("value", Json.fromString(value.toString))
)
}))
)
implicit val encodeFuncFunction: Encoder[GraphForFuncsFunction] = deriveEncoder
implicit val encodeFuncResult: Encoder[GraphForFuncsResult] = deriveEncoder
#main def main(): Json = {
GraphForFuncsResult(
cpg.method.map { method =>
val methodName = method.fullName
val methodId = method.toString
val methodFile = method.location.filename
val astChildren = method.astMinusRoot.l
val cfgChildren = method.out(EdgeTypes.CONTAINS).asScala.collect { case node: nodes.CfgNode => node }.toList
val local = new NodeSteps(
method
.out(EdgeTypes.CONTAINS)
.hasLabel(NodeTypes.BLOCK)
.out(EdgeTypes.AST)
.hasLabel(NodeTypes.LOCAL)
.cast[nodes.Local])
val sink = local.evalType(".*").referencingIdentifiers.dedup
val source = new NodeSteps(method.out(EdgeTypes.CONTAINS).hasLabel(NodeTypes.CALL).cast[nodes.Call]).nameNot("<operator>.*").dedup
val pdgChildren = sink
.reachableByFlows(source)
.l
.flatMap { path =>
path.elements
.map {
case trackingPoint # (_: MethodParameterIn) => trackingPoint.start.method.head
case trackingPoint => trackingPoint.cfgNode
}
}
.filter(_.toString != methodId)
GraphForFuncsFunction(methodName, methodFile, methodId, astChildren, cfgChildren, pdgChildren.distinct)
}.l
).asJson
}
error
graph-for-funcs.sc:92: value evalType is not a member of io.shiftleft.semanticcpg.language.NodeSteps[io.shiftleft.codepropertygraph.generated.nodes.Local]
val sink = local.evalType(".*").referencingIdentifiers.dedup
^
graph-for-funcs.sc:93: value nameNot is not a member of io.shiftleft.semanticcpg.language.NodeSteps[io.shiftleft.codepropertygraph.generated.nodes.Call]
val source = new NodeSteps(method.out(EdgeTypes.CONTAINS).hasLabel(NodeTypes.CALL).cast[nodes.Call]).nameNot("<operator>.*").dedup
^
java.lang.RuntimeException: Compilation Failed
io.shiftleft.console.scripting.AmmoniteExecutor.$anonfun$runScript$7(AmmoniteExecutor.scala:50)
cats.effect.internals.IORunLoop$.liftedTree3$1(IORunLoop.scala:229)
cats.effect.internals.IORunLoop$.step(IORunLoop.scala:229)
cats.effect.IO.unsafeRunTimed(IO.scala:320)
cats.effect.IO.unsafeRunSync(IO.scala:239)
io.shiftleft.console.scripting.ScriptManager.runScript(ScriptManager.scala:130)
io.shiftleft.console.scripting.ScriptManager$CpgScriptRunner.runScript(ScriptManager.scala:64)
io.shiftleft.console.scripting.ScriptManager$CpgScriptRunner.runScript(ScriptManager.scala:54)
ammonite.$sess.cmd8$.<clinit>(cmd8.sc:1)
Related
I have this 2 methods
import org.h2.store.fs.FilePath
import zio.*
import zio.Console.printLine
import zio.http.Client
import zio.nio.file.*
import zio.nio.charset.Charset
import zio.stream.*
import java.io.IOException
object FileStorage:
def saveToFile(data: String = "", filePath: String = "src/main/resources/data.json"): Unit =
lazy val logic = for {
encoded <- Charset.Standard.utf8.encodeString(data)
path = Path(filePath.split("/").head, filePath.split("/").tail: _*)
notExists <- Files.notExists(path)
- <- if (notExists) Files.createFile(path) else ZIO.attempt(())
_ <- Files.writeBytes(path, encoded)
_ <- Console.printLine(s"written to $path")
} yield ()
def unsafeF = (unsafeVal: Unsafe) => {
implicit val unsafe: Unsafe = unsafeVal
Runtime.default.unsafe.run(logic)
}
Unsafe.unsafe(unsafeF)
def readFromFile: ZIO[Any, Throwable, String] = {
val path = Path("src", "main", "resources", "data.json")
val bool = for bool <- Files.isReadable(path) yield bool
val zioStr = bool.flatMap(bool =>
if (bool) Files.readAllLines(path, Charset.Standard.utf8).map(fileLines => fileLines.head)
else {
saveToFile()
readFromFile})
zioStr
}
In def readFromFile i try to make empty an empty file if file don't exists
File generation working fine
then I'm trying to read that empty file and return it like a ZIO Response like that
import zio.http.{Client, *}
import zio.json.*
import zio.http.model.Method
import zio.{Scope, Task, ZIO, ZIOAppDefault}
import zio.http.Client
import zhttp.http.Status.NotFound
import zhttp.http.Status
import scala.language.postfixOps
import zio._
import scala.collection.immutable.List
import zio.{ExitCode, URIO, ZIO}
object ClientServer extends ZIOAppDefault {
val app: Http[Client, Throwable, Request, Response] = Http.collectZIO[Request]
case Method.GET -> !! / "readLeagues" =>
FileStorage.readFromFile.map(str => Response.json(str))
BUT in this case I getting Internal Server Error 500 on postman in http://localhost:8080/readLeagues
If at first I feed prefilled json file to
def readFromFile
It works fine Status: 200
And I getting a nice looking json as a body
Maybe I should set another default strings for data to prefill
def saveToFile
so json can be parsable?
or smth else
I'm writing an app using Scala 2.13 with Akka HTTP 10.2.4 and Akka Stream 2.6.15. I'm trying to query a web service in a parallel manner, like so:
package com.example
import akka.actor.typed.scaladsl.ActorContext
import akka.http.scaladsl.Http
import akka.http.scaladsl.client.RequestBuilding.Get
import akka.http.scaladsl.model.HttpResponse
import akka.http.scaladsl.unmarshalling.Unmarshal
import akka.stream.scaladsl.{Flow, JsonFraming, Sink, Source}
import spray.json.DefaultJsonProtocol
import spray.json.DefaultJsonProtocol.jsonFormat2
import scala.util.Try
case class ClientStockPortfolio(id: Long, symbol: String)
case class StockTicker(symbol: String, price: Double)
trait SprayFormat extends DefaultJsonProtocol {
implicit val stockTickerFormat = jsonFormat2(StockTicker)
}
class StockTrader(context: ActorContext[_]) extends SprayFormat {
implicit val system = context.system.classicSystem
val httpPool = Http().superPool()[Seq[ClientStockPortfolio]]
def collectPrices() = {
val src = Source(Seq(
ClientStockPortfolio(1, "GOOG"),
ClientStockPortfolio(2, "AMZN"),
ClientStockPortfolio(3, "MSFT")
)
)
val graph = src
.groupBy(8, _.id % 8)
.via(createPost)
.via(httpPool)
.via(decodeTicker)
.mergeSubstreamsWithParallelism(8)
.to(
Sink.fold(0.0) { (totalPrice, ticker) =>
insertIntoDatabase(ticker)
totalPrice + ticker.price
}
)
graph.run()
}
def createPost = Flow[ClientStockPortfolio]
.grouped(10)
.map { port =>
(
Get(uri = s"http://wherever/?symbols=${port.map(_.symbol).mkString(",")}"),
port
)
}
def decodeTicker = Flow[(Try[HttpResponse], Seq[ClientStockPortfolio])]
.flatMapConcat { x =>
x._1.get.entity.dataBytes
.via(JsonFraming.objectScanner(Int.MaxValue))
.mapAsync(4)(bytes => Unmarshal(bytes).to[StockTicker])
.mapConcat { ticker =>
lookupPreviousPrices(ticker)
}
}
def lookupPreviousPrices(ticker: StockTicker): List[StockTicker] = ???
def insertIntoDatabase(ticker: StockTicker) = ???
}
I have two questions. First, will the groupBy call that splits the stream into substreams run them in parallel like I want? And second, when I call this code, I run into the max-open-requests error, since I haven't increased the setting from the default. But even if I am running in parallel, I'm only running 8 threads - how is the Http().superPool() getting backed up with 32 requests?
Working to get this code running using notebooks in databricks(already tested and working with an IDE), can not get this working if I change the structure of the code.
import java.io.{BufferedReader, InputStreamReader}
import java.text.SimpleDateFormat
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.{FileSystem, Path}
object TestUnit {
val dateFormat = new SimpleDateFormat("yyyyMMdd")
case class Averages (cust: String, Num: String, date: String, credit: Double)
def main(args: Array[String]): Unit = {
val inputFile = "s3a://tfsdl-ghd-wb/raidnd/Cleartablet.csv"
val outputFile = "s3a://tfsdl-ghd-wb/raidnd/Incte_19&20.csv"
val fileSystem = getFileSystem(inputFile)
val inputData = readCSVFileLines(fileSystem, inputFile, skipHeader = true)
.toSeq
val filtinp = inputData.filter(x => x.nonEmpty)
.map(x => x.split(","))
.map(x => Revenue(x(6), x(5), x(0), x(8).toDouble))
// Create output writer
val writer = new PrintWriter(new File(outputFile))
// Header for output CSV file
writer.write("Date,customer,number,Credit,Average Credit/SKU\n")
filtinp.foreach{x =>
val (com1, avg1) = com1Average(filtermp, x)
val (com2, avg2) = com2Average(filtermp, x)
}
// Write row to output csv file
writer.write(s"${x.day},${x.customer},${x.number},${x.credit},${avgcredit1},${avgcredit2}\n")
writer.close() // close the writer`
}
}
I have been playing with use classes but I am continuously getting the error above when I try to implement them. This is my code:
import org.apache.spark.graphx._
import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}
class EdgeProperties()
case class WriterWriterProperties(weight: String, edgeType: String) extends EdgeProperties
object GraphXAnalysis2 {
val edgeWeightedWriterWriterCollaborated = "in/Graphs/Graph4_WriterWriter/EdgesWeightedWriterWriter_writerscollaborated.csv"
val vertexWriterWriter = "in/Graphs/Graph4_WriterWriter/Vertices.csv"
val conf = new SparkConf().setAppName("Music Graph Application").setMaster("local[1]")
val sc = new SparkContext(conf)
val WriterWriter: RDD[(VertexId, String)] = sc.textFile(vertexWriterWriter).map {
line =>
val row = line.split(",")
(row(0).toLong, row(2))
}
val edgesWriterWriterCollaborated: RDD[Edge[EdgeProperties]] = sc.textFile(edgeWeightedWriterWriterCollaborated).map {
line =>
val row = line.split(",")
Edge(row(0).toLong, row(1).toLong, WriterWriterProperties(row(2), row(3)): EdgeProperties)
}
val graph4 = Graph(WriterWriter, edgesWriterWriterCollaborated)
Am I declaring the class wrongly, using it wrong or putting it in the wrong position? Thank you so much as I am completely new to this.
This is my idea
import org.apache.spark.SparkConf
import org.apache.spark.SparkContext
import org.apache.spark.SparkContext._
import org.apache.spark.rdd.RDD
object pizD {
def filePath = {
new File(this.getClass.getClassLoader.getResource("wikipedia/wikipedia.dat").toURI).getPath
}
def regex(line: String): pichA = {
......
......
pichA(t1, t2)
}
}
case class pichA(t1: String, t2: String)
object dushP {
val conf = new SparkConf()
val sc = new SparkContext(conf)
val mirdd: RDD[pichA] = ???
How to integrate sc.textfile with my methods filePath and regex?I want to combine in order to get new rdd.
val baseRDD =sc.textfile(pizD.filepath).filter(line => {
val value = pizD.regex(line)
if(value !=null)
true
else false
})
Assuming pizD.filepath will give you file name as string and regex() would return null value if regex din match. If the understanding is correct, then above code would do the trick.