Consider this example code:
import swing._
import Swing._
import javax.swing.JTable
import javax.swing.table.AbstractTableModel
class MyTable(columnNames: Seq[String], model: Seq[Seq[Any]]) extends Component {
override lazy val peer = new JTable(new AbstractTableModel {
def getValueAt(row: Int, col: Int): AnyRef = model(row)(col).asInstanceOf[AnyRef]
def getColumnCount() = columnNames.length
def getRowCount() = model.length
override def isCellEditable(row: Int, column: Int) = false
})
peer setAutoCreateRowSorter true
}
object SO extends SimpleSwingApplication {
implicit def tabelRowData2Array[T](rowData: Seq[Seq[T]]) = rowData.map(_.toArray[Any]).toArray
val rowData = Seq(Seq("1"), Seq("2"), Seq("3"))
val columnNames = Seq("Nr")
def top = new MainFrame {
title = "TableTest"
val scalaTable = new Table(rowData,columnNames) {
peer setAutoCreateRowSorter true
}
val myTable = new MyTable(columnNames,rowData)
contents = new BoxPanel(Orientation.Horizontal) {
contents += new ScrollPane(scalaTable)
contents += new ScrollPane(myTable)
}
}
}
Why are the columns in scalaTable not sortable when clicking on the column name while the columns in myTable are?
And how can I use scala.swing.Table with sortable columns instead of reimplementing it by MyTable?
See my answer to my own question at Using TableRowSorter with scala.swing.Table. The Java 6 table sorting feature isn't implemented in scala.swing.Table. The code is commented out.
Related
I am trying to write an application using JavaFX and Scala (not ScalaFX). When I tried out this example from http://tutorials.jenkov.com/javafx/treetableview.html (Add TreeTableColumn to TreeTableView), I got a "Cannot resolve overloaded method 'add'" in the last two lines. I was wondering if you can help me get past this issue.
class Phase1 extends Application {
import javafx.scene.control.TreeTableColumn
import javafx.scene.control.TreeTableView
import javafx.scene.control.cell.TreeItemPropertyValueFactory
override def start(primaryStage: Stage): Unit = {
primaryStage.setTitle("Experimental Blocking Tree")
val scene = new Scene(new Group(), 1500, 800)
val sceneRoot = scene.getRoot.asInstanceOf[Group]
val treeTableView = new TreeTableView[Car]
val treeTableColumn1: TreeTableColumn[Car, String] = new TreeTableColumn[Car, String]("Brand")
val treeTableColumn2: TreeTableColumn[Car, String] = new TreeTableColumn[Car, String]("Model")
treeTableColumn1.setCellValueFactory(new TreeItemPropertyValueFactory[Car, String]("brand"))
treeTableColumn2.setCellValueFactory(new TreeItemPropertyValueFactory[Car, String]("model"))
treeTableView.getColumns.add(treeTableColumn1) // cannot resolve overloaded method here
treeTableView.getColumns.add(treeTableColumn2) // and here
}
}
Thanks in advance.
I had the same issue with displaying data in TreeTableView.
Jarek posted a solution here: GitHub Issue
Also this works for me:
import scalafx.beans.property.ReadOnlyStringProperty
case class Car (
val brand: ReadOnlyStringProperty,
val model: ReadOnlyStringProperty
)
class CarStringFactory(val stringValue: ReadOnlyStringProperty) extends scalafx.beans.value.ObservableValue[String, String] {
override def delegate: javafx.beans.value.ObservableValue[String] = stringValue
override def value: String = stringValue.get
}
class YourScalaFXApp {
// ... boilerplate code ...
import scalafx.scene.control.{TreeTableView, TreeTableColumn}
val treeTableView = new TreeTableView[Car]
val treeTableColumn1: TreeTableColumn[Car, String] = new TreeTableColumn[Car, String]("Brand"){
cellValueFactory = {p => new CarStringFactory(p.value.value.value.brand) }
}
val treeTableColumn2: TreeTableColumn[Car, String] = new TreeTableColumn[Car, String]("Model"){
cellValueFactory = {p => new CarStringFactory(p.value.value.value.model) }
}
treeTableView.getColumns.add(treeTableColumn1)
treeTableView.getColumns.add(treeTableColumn2)
}
Refer to
ScalaFX documentation: Properties
TreeTableColumn.cellValueFactory
I am experiencing a reproducible error while producing Avro messages with reactive kafka and avro4s. Once the identityMapCapacity of the client (CachedSchemaRegistryClient) is reached, serialization fails with
java.lang.IllegalStateException: Too many schema objects created for <myTopic>-value
This is unexpected, since all messages should have the same schema - they are serializations of the same case class.
val avroProducerSettings: ProducerSettings[String, GenericRecord] =
ProducerSettings(system, Serdes.String().serializer(),
avroSerde.serializer())
.withBootstrapServers(settings.bootstrapServer)
val avroProdFlow: Flow[ProducerMessage.Message[String, GenericRecord, String],
ProducerMessage.Result[String, GenericRecord, String],
NotUsed] = Producer.flow(avroProducerSettings)
val avroQueue: SourceQueueWithComplete[Message[String, GenericRecord, String]] =
Source.queue(bufferSize, overflowStrategy)
.via(avroProdFlow)
.map(logResult)
.to(Sink.ignore)
.run()
...
queue.offer(msg)
The serializer is a KafkaAvroSerializer, instantiated with a new CachedSchemaRegistryClient(settings.schemaRegistry, 1000)
Generating the GenericRecord:
def toAvro[A](a: A)(implicit recordFormat: RecordFormat[A]): GenericRecord =
recordFormat.to(a)
val makeEdgeMessage: (Edge, String) => Message[String, GenericRecord, String] = { (edge, topic) =>
val edgeAvro: GenericRecord = toAvro(edge)
val record = new ProducerRecord[String, GenericRecord](topic, edge.id, edgeAvro)
ProducerMessage.Message(record, edge.id)
}
The schema is created deep in the code (io.confluent.kafka.serializers.AbstractKafkaAvroSerDe#getSchema, invoked by io.confluent.kafka.serializers.AbstractKafkaAvroSerializer#serializeImpl) where I have no influence on it, so I have no idea how to fix the leak. Looks to me like the two confluent projects do not work well together.
The issues I have found here, here and here do not seem to address my use case.
The two workarounds for me are currently:
not use schema registry - not a long-term solution obviously
create custom SchemaRegistryClient not relying on object identity - doable but I would like to avoid creating more issues than by reimplementing
Is there a way to generate or cache a consistent schema depending on message/record type and use it with my setup?
edit 2017.11.20
The issue in my case was that each instance of GenericRecord carrying my message has been serialized by a different instance of RecordFormat, containing a different instance of the Schema. The implicit resolution here generated a new instance each time.
def toAvro[A](a: A)(implicit recordFormat: RecordFormat[A]): GenericRecord = recordFormat.to(a)
The solution was to pin the RecordFormat instance to a val and reuse it explicitly. Many thanks to https://github.com/heliocentrist for explaining the details.
original response:
After waiting for a while (also no answer for the github issue) I had to implement my own SchemaRegistryClient. Over 90% is copied from the original CachedSchemaRegistryClient, just translated into scala. Using a scala mutable.Map fixed the memory leak. I have not performed any comprehensive tests, so use at your own risk.
import java.util
import io.confluent.kafka.schemaregistry.client.rest.entities.{ Config, SchemaString }
import io.confluent.kafka.schemaregistry.client.rest.entities.requests.ConfigUpdateRequest
import io.confluent.kafka.schemaregistry.client.rest.{ RestService, entities }
import io.confluent.kafka.schemaregistry.client.{ SchemaMetadata, SchemaRegistryClient }
import org.apache.avro.Schema
import scala.collection.mutable
class CachingSchemaRegistryClient(val restService: RestService, val identityMapCapacity: Int)
extends SchemaRegistryClient {
val schemaCache: mutable.Map[String, mutable.Map[Schema, Integer]] = mutable.Map()
val idCache: mutable.Map[String, mutable.Map[Integer, Schema]] =
mutable.Map(null.asInstanceOf[String] -> mutable.Map())
val versionCache: mutable.Map[String, mutable.Map[Schema, Integer]] = mutable.Map()
def this(baseUrl: String, identityMapCapacity: Int) {
this(new RestService(baseUrl), identityMapCapacity)
}
def this(baseUrls: util.List[String], identityMapCapacity: Int) {
this(new RestService(baseUrls), identityMapCapacity)
}
def registerAndGetId(subject: String, schema: Schema): Int =
restService.registerSchema(schema.toString, subject)
def getSchemaByIdFromRegistry(id: Int): Schema = {
val restSchema: SchemaString = restService.getId(id)
(new Schema.Parser).parse(restSchema.getSchemaString)
}
def getVersionFromRegistry(subject: String, schema: Schema): Int = {
val response: entities.Schema = restService.lookUpSubjectVersion(schema.toString, subject)
response.getVersion.intValue
}
override def getVersion(subject: String, schema: Schema): Int = synchronized {
val schemaVersionMap: mutable.Map[Schema, Integer] =
versionCache.getOrElseUpdate(subject, mutable.Map())
val version: Integer = schemaVersionMap.getOrElse(
schema, {
if (schemaVersionMap.size >= identityMapCapacity) {
throw new IllegalStateException(s"Too many schema objects created for $subject!")
}
val version = new Integer(getVersionFromRegistry(subject, schema))
schemaVersionMap.put(schema, version)
version
}
)
version.intValue()
}
override def getAllSubjects: util.List[String] = restService.getAllSubjects()
override def getByID(id: Int): Schema = synchronized { getBySubjectAndID(null, id) }
override def getBySubjectAndID(subject: String, id: Int): Schema = synchronized {
val idSchemaMap: mutable.Map[Integer, Schema] = idCache.getOrElseUpdate(subject, mutable.Map())
idSchemaMap.getOrElseUpdate(id, getSchemaByIdFromRegistry(id))
}
override def getSchemaMetadata(subject: String, version: Int): SchemaMetadata = {
val response = restService.getVersion(subject, version)
val id = response.getId.intValue
val schema = response.getSchema
new SchemaMetadata(id, version, schema)
}
override def getLatestSchemaMetadata(subject: String): SchemaMetadata = synchronized {
val response = restService.getLatestVersion(subject)
val id = response.getId.intValue
val version = response.getVersion.intValue
val schema = response.getSchema
new SchemaMetadata(id, version, schema)
}
override def updateCompatibility(subject: String, compatibility: String): String = {
val response: ConfigUpdateRequest = restService.updateCompatibility(compatibility, subject)
response.getCompatibilityLevel
}
override def getCompatibility(subject: String): String = {
val response: Config = restService.getConfig(subject)
response.getCompatibilityLevel
}
override def testCompatibility(subject: String, schema: Schema): Boolean =
restService.testCompatibility(schema.toString(), subject, "latest")
override def register(subject: String, schema: Schema): Int = synchronized {
val schemaIdMap: mutable.Map[Schema, Integer] =
schemaCache.getOrElseUpdate(subject, mutable.Map())
val id = schemaIdMap.getOrElse(
schema, {
if (schemaIdMap.size >= identityMapCapacity)
throw new IllegalStateException(s"Too many schema objects created for $subject!")
val id: Integer = new Integer(registerAndGetId(subject, schema))
schemaIdMap.put(schema, id)
idCache(null).put(id, schema)
id
}
)
id.intValue()
}
}
How does one access the parameters used to construct a Module from inside the Tester that is testing it?
In the test below I am passing the parameters explicitly both to the Module and to the Tester. I would prefer not to have to pass them to the Tester but instead extract them from the module that was also passed in.
Also I am new to scala/chisel so any tips on bad techniques I'm using would be appreciated :).
import Chisel._
import math.pow
class TestA(dataWidth: Int, arrayLength: Int) extends Module {
val dataType = Bits(INPUT, width = dataWidth)
val arrayType = Vec(gen = dataType, n = arrayLength)
val io = new Bundle {
val i_valid = Bool(INPUT)
val i_data = dataType
val i_array = arrayType
val o_valid = Bool(OUTPUT)
val o_data = dataType.flip
val o_array = arrayType.flip
}
io.o_valid := io.i_valid
io.o_data := io.i_data
io.o_array := io.i_array
}
class TestATests(c: TestA, dataWidth: Int, arrayLength: Int) extends Tester(c) {
val maxData = pow(2, dataWidth).toInt
for (t <- 0 until 16) {
val i_valid = rnd.nextInt(2)
val i_data = rnd.nextInt(maxData)
val i_array = List.fill(arrayLength)(rnd.nextInt(maxData))
poke(c.io.i_valid, i_valid)
poke(c.io.i_data, i_data)
(c.io.i_array, i_array).zipped foreach {
(element,value) => poke(element, value)
}
expect(c.io.o_valid, i_valid)
expect(c.io.o_data, i_data)
(c.io.o_array, i_array).zipped foreach {
(element,value) => poke(element, value)
}
step(1)
}
}
object TestAObject {
def main(args: Array[String]): Unit = {
val tutArgs = args.slice(0, args.length)
val dataWidth = 5
val arrayLength = 6
chiselMainTest(tutArgs, () => Module(
new TestA(dataWidth=dataWidth, arrayLength=arrayLength))){
c => new TestATests(c, dataWidth=dataWidth, arrayLength=arrayLength)
}
}
}
If you make the arguments dataWidth and arrayLength members of TestA you can just reference them. In Scala this can be accomplished by inserting val into the argument list:
class TestA(val dataWidth: Int, val arrayLength: Int) extends Module ...
Then you can reference them from the test as members with c.dataWidth or c.arrayLength
Is there a way to have Slick's code generation generate code for only a single schema? Say, public? I have extensions that create a whole ton of tables (eg postgis, pg_jobman) that make the code that slick generates gigantic.
Use this code with appropriate values and schema name,
object CodeGenerator {
def outputDir :String =""
def pkg:String =""
def schemaList:String = "schema1, schema2"
def url:String = "dburl"
def fileName:String =""
val user = "dbUsername"
val password = "dbPassword"
val slickDriver="scala.slick.driver.PostgresDriver"
val JdbcDriver = "org.postgresql.Driver"
val container = "Tables"
def generate() = {
val driver: JdbcProfile = buildJdbcProfile
val schemas = createSchemaList
var model = createModel(driver,schemas)
val codegen = new SourceCodeGenerator(model){
// customize Scala table name (table class, table values, ...)
override def tableName = dbTableName => dbTableName match {
case _ => dbTableName+"Table"
}
override def code = {
//imports is copied right out of
//scala.slick.model.codegen.AbstractSourceCodeGenerator
val imports = {
"import scala.slick.model.ForeignKeyAction\n" +
(if (tables.exists(_.hlistEnabled)) {
"import scala.slick.collection.heterogenous._\n" +
"import scala.slick.collection.heterogenous.syntax._\n"
} else ""
) +
(if (tables.exists(_.PlainSqlMapper.enabled)) {
"import scala.slick.jdbc.{GetResult => GR}\n" +
"// NOTE: GetResult mappers for plain SQL are only generated for tables where Slick knows how to map the types of all columns.\n"
} else ""
) + "\n\n" //+ tables.map(t => s"implicit val ${t.model.name.table}Format = Json.format[${t.model.name.table}]").mkString("\n")+"\n\n"
}
val bySchema = tables.groupBy(t => {
t.model.name.schema
})
val schemaFor = (schema: Option[String]) => {
bySchema(schema).sortBy(_.model.name.table).map(
_.code.mkString("\n")
).mkString("\n\n")
}
}
val joins = tables.flatMap( _.foreignKeys.map{ foreignKey =>
import foreignKey._
val fkt = referencingTable.TableClass.name
val pkt = referencedTable.TableClass.name
val columns = referencingColumns.map(_.name) zip
referencedColumns.map(_.name)
s"implicit def autojoin${fkt + name.toString} = (left:${fkt} ,right:${pkt}) => " +
columns.map{
case (lcol,rcol) =>
"left."+lcol + " === " + "right."+rcol
}.mkString(" && ")
})
override def entityName = dbTableName => dbTableName match {
case _ => dbTableName
}
override def Table = new Table(_) {
table =>
// customize table value (TableQuery) name (uses tableName as a basis)
override def TableValue = new TableValue {
override def rawName = super.rawName.uncapitalize
}
// override generator responsible for columns
override def Column = new Column(_){
// customize Scala column names
override def rawName = (table.model.name.table,this.model.name) match {
case _ => super.rawName
}
}
}
}
println(outputDir+"\\"+fileName)
(new File(outputDir)).mkdirs()
val fw = new FileWriter(outputDir+File.separator+fileName)
fw.write(codegen.packageCode(slickDriver, pkg, container))
fw.close()
}
def createModel(driver: JdbcProfile, schemas:Set[Option[String]]): Model = {
driver.simple.Database
.forURL(url, user = user, password = password, driver = JdbcDriver)
.withSession { implicit session =>
val filteredTables = driver.defaultTables.filter(
(t: MTable) => schemas.contains(t.name.schema)
)
PostgresDriver.createModel(Some(filteredTables))
}
}
def createSchemaList: Set[Option[String]] = {
schemaList.split(",").map({
case "" => None
case (name: String) => Some(name)
}).toSet
}
def buildJdbcProfile: JdbcProfile = {
val module = currentMirror.staticModule(slickDriver)
val reflectedModule = currentMirror.reflectModule(module)
val driver = reflectedModule.instance.asInstanceOf[JdbcProfile]
driver
}
}
I encountered the same problem and I found this question. The answer by S.Karthik sent me in the right direction. However, the code in the answer is slightly outdated. And I think a bit over-complicated. So I crafted my own solution:
import slick.codegen.SourceCodeGenerator
import slick.driver.JdbcProfile
import slick.model.Model
import scala.concurrent.duration.Duration
import scala.concurrent.{Await, ExecutionContext}
val slickDriver = "slick.driver.PostgresDriver"
val jdbcDriver = "org.postgresql.Driver"
val url = "jdbc:postgresql://localhost:5432/mydb"
val outputFolder = "/path/to/src/test/scala"
val pkg = "com.mycompany"
val user = "user"
val password = "password"
object MySourceCodeGenerator {
def run(slickDriver: String, jdbcDriver: String, url: String, outputDir: String,
pkg: String, user: Option[String], password: Option[String]): Unit = {
val driver: JdbcProfile =
Class.forName(slickDriver + "$").getField("MODULE$").get(null).asInstanceOf[JdbcProfile]
val dbFactory = driver.api.Database
val db = dbFactory.forURL(url, driver = jdbcDriver, user = user.orNull,
password = password.orNull, keepAliveConnection = true)
try {
// **1**
val allSchemas = Await.result(db.run(
driver.createModel(None, ignoreInvalidDefaults = false)(ExecutionContext.global).withPinnedSession), Duration.Inf)
// **2**
val publicSchema = new Model(allSchemas.tables.filter(_.name.schema.isEmpty), allSchemas.options)
// **3**
new SourceCodeGenerator(publicSchema).writeToFile(slickDriver, outputDir, pkg)
} finally db.close
}
}
MySourceCodeGenerator.run(slickDriver, jdbcDriver, url, outputFolder, pkg, Some(user), Some(password))
I'll explain what's going on here:
I copied the run function from the SourceCodeGenerator class that's in the slick-codegen library. (I used version slick-codegen_2.10-3.1.1.)
// **1**: In the origninal code, the generated Model was referenced in a val called m. I renamed that to allSchemas.
// **2**: I created a new Model (publicSchema), using the options from the original model, and using a filtered version of the tables set from the original model. It turns out tables from the public schema don't get a schema name in the model. Hence the isEmpty. Should you need tables from one or more other schemas, you can easily create a different filter expression.
// **3**: I create a SourceCodeGenerator with the created publicSchema model.
Of course, it would even be better if the Slick codegenerator could incorporate an option to select one or more schemas.
I am using Play framework 2.1.1 with scala.I query a database table return to controller as list and then convert list to string and return to ajax call from javascript code.
How to return query result as json and return to ajax call throught controller?
Application.scala
import play.api._
import play.api.mvc._
import play.api.data._
import views.html._
import models._
object Application extends Controller {
def index = Action {
Ok(views.html.index())
}
def getSummaryTable = Action{
var sum="Summary Table";
Ok(ajax_result.render((Timesheet.getAll).mkString("\n")))
}
def javascriptRoutes = Action { implicit request =>
import routes.javascript._
Ok(
Routes.javascriptRouter("jsRoutes")(
// Routes
controllers.routes.javascript.Application.getSummaryTable
)
).as("text/javascript")
}
}
TimeSheet.scala
// Use PostgresDriver to connect to a Postgres database
import scala.slick.driver.PostgresDriver.simple._
import scala.slick.lifted.{MappedTypeMapper,BaseTypeMapper,TypeMapperDelegate}
import scala.slick.driver.BasicProfile
import scala.slick.session.{PositionedParameters,PositionedResult}
// Use the implicit threadLocalSession
import Database.threadLocalSession
import java.sql.Date
import java.sql.Time
case class Timesheet(ID: Int, dateVal: String, entryTime: Time, exitTime: Time, someVal: String)
object Timesheet {
//Definition of Timesheet table
// object TS extends Table[(Int,String,Time,Time,String)]("timesheet"){
val TSTable = new Table[Timesheet]("timesheet"){
def ID = column[Int]("id")
def dateVal = column[String]("date")
def entryTime = column[Time]("entry_time")
def exitTime = column[Time]("exit_time")
def someVal = column[String]("someval")
def * = ID ~ dateVal ~ entryTime ~ exitTime ~ someVal <> (Timesheet.apply _, Timesheet.unapply _)
}
def getAll: Seq[Timesheet] = {
Database.forURL("jdbc:postgresql://localhost:5432/my_db", "postgres", "password",null, driver="org.postgresql.Driver") withSession{
val q = Query(TSTable)
val qFiltered = q.filter(_.ID === 41 )
val qDateFilter = qFiltered.filter(_.dateVal === "01/03/2013")
val qSorted = qDateFilter.sortBy(_.entryTime)
qSorted.list
}
}
}
Also, don't forget to provide an implicit (or not) Json deserializer for your model, otherwise, Scala compiler will yell at you :-). You can do something like :
def allTimesheet = Action {
val timesheetWrites = Json.writes[Timesheet] // here it's the deserializer
val listofTimeSheet = Timesheet.getAll
Ok( Json.toJson( listofTimeSheet )( timesheetWrites ) )
}
or you can use implicits like :
def allTimesheet = Action {
implicit val timesheetWrites = Json.writes[Timesheet] // here it's the deserializer
val listofTimeSheet = Timesheet.getAll
Ok( Json.toJson( listofTimeSheet ) )
}
and even declare your deserializer in your model companion object like :
companion object
object Timesheet {
implicit val timesheetWrites = Json.writes[Timesheet] // here it's the deserializer
....
}
and in the controller
import models.Timesheet.timesheetWrites
def allTimesheet = Action {
val listofTimeSheet = Timesheet.getAll
Ok( Json.toJson( listofTimeSheet ) )
}
I recommend you use play.api.libs.Json.toJson.
Here's an example:
object Products extends Controller {
def list = Action {
val productCodes = Product.findAll.map(_.ean)
Ok(Json.toJson(productCodes))
}
Json.toJson returns a JsValue for which Play will automatically add a application/json header.
See Play For Scala chapter 8.