Related
I'm trying to run the DeepCrack model, a CNN to find a pavement crack detection model. But I'm getting this error. I found the following error. I understood that the problem is in Outputs = model(image) here because my model is returning a tuple here instead of tensors. I tried to convert the output to tensor but it is not working. So, How can I solve this? I added my full code here. please help me to get rid of this.
#!/usr/bin/env python
# coding: utf-8
# In[ ]:
# License: BSD
# Author: Sasank Chilamkurthy
from __future__ import print_function, division
import torch
import torch.nn as nn
import torch.optim as optim
from torch.optim import lr_scheduler
import torch.backends.cudnn as cudnn
import numpy as np
import torchvision
from torchvision import datasets, models, transforms
from torch.utils.data import random_split
import matplotlib.pyplot as plt
import time
import os
import copy
cudnn.benchmark = True
plt.ion() # interactive mode
from random import *
from tqdm.notebook import tqdm, trange
from time import sleep
from pathlib import Path
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torchvision import transforms, models
from torchvision.datasets import ImageFolder
from warnings import filterwarnings
filterwarnings('ignore')
# functions to show an image
def imshow(img):
img = img / 2 + 0.5 # unnormalize
npimg = img.numpy()
plt.imshow(np.transpose(npimg, (1, 2, 0)))
plt.show()
plt.savefig('labels.JPG')
## codes for data augmentation
train_trans = transforms.Compose([
transforms.Resize((224, 224)),
transforms.RandomHorizontalFlip(p=0.5), ## tamim: image will move left and right
transforms.RandomVerticalFlip(p=0.5), ## tamim: image will come to eye vertically
transforms.RandomRotation(degrees=(.5, 5)), ## very small rotation of the cracks
transforms.ToTensor(),
transforms.Normalize(
mean=[0.5, 0.5, 0.5],
std=[0.5, 0.5, 0.5]
)
])
test_trans = transforms.Compose([
transforms.Resize((224, 224)),
transforms.RandomHorizontalFlip(p=0.5), ## tamim: image will move left and right
transforms.RandomVerticalFlip(p=0.5), ## tamim: image will come to eye vertically
transforms.RandomRotation(degrees=(.5, 5)), ## very small rotation of the cracks
transforms.ToTensor(),
transforms.Normalize(
mean=[0.5, 0.5, 0.5],
std=[0.5, 0.5, 0.5]
)
])
## Load data
from torchvision.datasets import ImageFolder
data = ImageFolder('../Data/Data_Structure(Annotated)', transform=train_trans , )
test_folder= ImageFolder("../Data/DATA_iPhone_13_Pro_Max", transform=test_trans, )
batch_size = 32
num_classes = 4
learning_rate = 0.01
num_epochs = 10
print("Follwing classes are there : \n",data.classes)
classes = ('Alligator Cracks', 'Delamination', 'Longitudinal Cracks', 'Transverse Cracks')
len(data)
##Splitting Data and Prepare Batches:
## Source: https://medium.com/thecyphy/train-cnn-model-with-pytorch-21dafb918f48
val_size = 127 ## Tamim:30% data for validation ##
train_size = len(data) - val_size
train_loader,val_loader = random_split(data,[train_size,val_size]) ## To randomly split the images into training and testing, PyTorch provides random_split()
print(f"Length of Train Data : {len(train_loader)}") ## changed the folder names
print(f"Length of Validation Data : {len(val_loader)}")
# Splitting train and validation data on batches
train_loader = torch.utils.data.DataLoader(train_loader, shuffle=True, batch_size=batch_size) ## defined train data & val data
val_loader = torch.utils.data.DataLoader(val_loader, shuffle=True, batch_size=batch_size)
test_loader = torch.utils.data.DataLoader(test_folder, shuffle=False, batch_size=batch_size)
# visualize images of a single batch
dataiter = iter(train_loader)
images, labels = next(dataiter)
# show images
imshow(torchvision.utils.make_grid(images))
# print labels
print(' '.join(f'{classes[labels[j]]:5s}' for j in range(batch_size)))
# print(model)
from torch import nn
import torch
import torch.nn.functional as F
def Conv3X3(in_, out):
return torch.nn.Conv2d(in_, out, 3, padding=1)
class ConvRelu(nn.Module):
def __init__(self, in_, out):
super().__init__()
self.conv = Conv3X3(in_, out)
self.activation = torch.nn.ReLU(inplace=True)
def forward(self, x):
x = self.conv(x)
x = self.activation(x)
return x
class Down(nn.Module):
def __init__(self, nn):
super(Down,self).__init__()
self.nn = nn
self.maxpool_with_argmax = torch.nn.MaxPool2d(kernel_size=2, stride=2, return_indices=True)
def forward(self,inputs):
down = self.nn(inputs)
unpooled_shape = down.size()
outputs, indices = self.maxpool_with_argmax(down)
return outputs, down, indices, unpooled_shape
class Up(nn.Module):
def __init__(self, nn):
super().__init__()
self.nn = nn
self.unpool=torch.nn.MaxUnpool2d(2,2)
def forward(self,inputs,indices,output_shape):
outputs = self.unpool(inputs, indices=indices, output_size=output_shape)
outputs = self.nn(outputs)
return outputs
class Fuse(nn.Module):
def __init__(self, nn, scale):
super().__init__()
self.nn = nn
self.scale = scale
self.conv = Conv3X3(64,1)
def forward(self,down_inp,up_inp):
outputs = torch.cat([down_inp, up_inp], 1)
outputs = F.interpolate(outputs, scale_factor=self.scale, mode='bilinear')
outputs = self.nn(outputs)
return self.conv(outputs)
class DeepCrack(nn.Module):
def __init__(self, num_classes=1000):
super(DeepCrack, self).__init__()
self.down1 = Down(torch.nn.Sequential(
ConvRelu(3,64),
ConvRelu(64,64),
))
self.down2 = Down(torch.nn.Sequential(
ConvRelu(64,128),
ConvRelu(128,128),
))
self.down3 = Down(torch.nn.Sequential(
ConvRelu(128,256),
ConvRelu(256,256),
ConvRelu(256,256),
))
self.down4 = Down(torch.nn.Sequential(
ConvRelu(256, 512),
ConvRelu(512, 512),
ConvRelu(512, 512),
))
self.down5 = Down(torch.nn.Sequential(
ConvRelu(512, 512),
ConvRelu(512, 512),
ConvRelu(512, 512),
))
self.up1 = Up(torch.nn.Sequential(
ConvRelu(64, 64),
ConvRelu(64, 64),
))
self.up2 = Up(torch.nn.Sequential(
ConvRelu(128, 128),
ConvRelu(128, 64),
))
self.up3 = Up(torch.nn.Sequential(
ConvRelu(256, 256),
ConvRelu(256, 256),
ConvRelu(256, 128),
))
self.up4 = Up(torch.nn.Sequential(
ConvRelu(512, 512),
ConvRelu(512, 512),
ConvRelu(512, 256),
))
self.up5 = Up(torch.nn.Sequential(
ConvRelu(512, 512),
ConvRelu(512, 512),
ConvRelu(512, 512),
))
self.fuse5 = Fuse(ConvRelu(512 + 512, 64), scale=16)
self.fuse4 = Fuse(ConvRelu(512 + 256, 64), scale=8)
self.fuse3 = Fuse(ConvRelu(256 + 128, 64), scale=4)
self.fuse2 = Fuse(ConvRelu(128 + 64, 64), scale=2)
self.fuse1 = Fuse(ConvRelu(64 + 64, 64), scale=1)
self.final = Conv3X3(5,1)
def forward(self,inputs):
# encoder part
out, down1, indices_1, unpool_shape1 = self.down1(inputs)
out, down2, indices_2, unpool_shape2 = self.down2(out)
out, down3, indices_3, unpool_shape3 = self.down3(out)
out, down4, indices_4, unpool_shape4 = self.down4(out)
out, down5, indices_5, unpool_shape5 = self.down5(out)
# decoder part
up5 = self.up5(out, indices=indices_5, output_shape=unpool_shape5)
up4 = self.up4(up5, indices=indices_4, output_shape=unpool_shape4)
up3 = self.up3(up4, indices=indices_3, output_shape=unpool_shape3)
up2 = self.up2(up3, indices=indices_2, output_shape=unpool_shape2)
up1 = self.up1(up2, indices=indices_1, output_shape=unpool_shape1)
fuse5 = self.fuse5(down_inp=down5,up_inp=up5)
fuse4 = self.fuse4(down_inp=down4, up_inp=up4)
fuse3 = self.fuse3(down_inp=down3, up_inp=up3)
fuse2 = self.fuse2(down_inp=down2, up_inp=up2)
fuse1 = self.fuse1(down_inp=down1, up_inp=up1)
output = self.final(torch.cat([fuse5,fuse4,fuse3,fuse2,fuse1],1))
return output, fuse5, fuse4, fuse3, fuse2, fuse1
if __name__ == '__main__':
inp = torch.randn((1,3,512,512))
model = DeepCrack()
out = model(inp)
model = DeepCrack()
print(model)
# specify loss function
criterion = nn.CrossEntropyLoss()
# specify loss function
optimizer = torch.optim.Adam(model.parameters(), lr=0.001)
# number of epochs to train the model
n_epochs = 10
for epoch in range(1, n_epochs+1):
# monitor training loss
train_loss = 0.0
###################
# train the model #
###################
for data in train_loader:
# _ stands in for labels, here
# no need to flatten images
images, _ = data
# clear the gradients of all optimized variables
optimizer.zero_grad()
# forward pass: compute predicted outputs by passing inputs to the model
outputs = model(images)
# calculate the loss
loss = criterion(outputs, images)
# backward pass: compute gradient of the loss with respect to model parameters
loss.backward()
# perform a single optimization step (parameter update)
optimizer.step()
# update running training loss
train_loss += loss.item()*images.size(0)
# print avg training statistics
train_loss = train_loss/len(train_loader)
print('Epoch: {} \tTraining Loss: {:.6f}'.format(
epoch,
train_loss
)) ```
Traceback (most recent call last):
File "test_deepcrack.py", line 320, in <module>
loss = criterion(outputs, images)
File "/apps/pkg/pytorch/1.10.2/cuda/lib/python3.8/site-
packages/torch/nn/modules/module.py", line 1102, in _call_impl
return forward_call(*input, **kwargs)
File "/apps/pkg/pytorch/1.10.2/cuda/lib/python3.8/site-
packages/torch/nn/modules/loss.py", line 1150, in forward
> return F.cross_entropy(input, target, weight=self.weight,
> File "/apps/pkg/pytorch/1.10.2/cuda/lib/python3.8/site-packages/torch/nn/functional.py", > line 2846, in cross_entropy
> return torch._C._nn.cross_entropy_loss(input, target, weight,
> _Reduction.get_enum(reduction), ignore_index, label_smoothing)
>TypeError: cross_entropy_loss(): argument 'input' (position 1) must be Tensor, not tuple
from machine import Pin,SPI,PWM
import framebuf
import time
import os
import random
BL = 13
DC = 8
RST = 12
MOSI = 11
SCK = 10
CS = 9
class LCD_1inch3(framebuf.FrameBuffer): # For 320x240 display
framebuf =__import__('framebuf')
def __init__(self):
self.width = 320
self.height = 240
self.cs = Pin(CS,Pin.OUT)
self.rst = Pin(RST,Pin.OUT)
self.cs(1)
self.spi = SPI(1)
self.spi = SPI(1,1000_000)
self.spi = SPI(1,100000_000,polarity=0, phase=0,sck=Pin(SCK),mosi=Pin(MOSI),miso=None)
self.dc = Pin(DC,Pin.OUT)
self.dc(1)
self.buffer = bytearray(self.height * self.width * 2)
super().__init__(self.buffer, self.width, self.height, framebuf.RGB565)
self.init_display()
self.RED = 0x07E0
self.GREEN = 0x001f
self.BLUE = 0xf800
self.WHITE = 0xffff
self.BLACK = 0x0000
self.YELLOW= 0x07FF
def eyes2(self):
self.ellipse(10,10, 10, 10, LCD.BLACK)
I keep getting "AttributeError: 'LCD_1inch3' object has no attribute 'poly'" and idk y.
I can use the other ones in the framebuffer imported library like vline(), hline(), line(), rect(), fill() and text() but thats around it. Ive been trying to be able to use poly() and ellipse() but no success
For an open source pokerbot I'm trying to recognize images as implemented here. I have tried the following with an example image that I'd like tesseract to recognize:
pytesseract.image_to_string(img_orig)
Out[32]: 'cies TE'
pytesseract.image_to_string(img_mod, 'eng', config='--psm 6 --oem 1 -c tessedit_char_whitelist=0123456789.$£B')
Out[33]: ''
So then let's use some more sophisticated methods by scaling::
basewidth = 200
wpercent = (basewidth / float(img_orig.size[0]))
hsize = int((float(img_orig.size[1]) * float(wpercent)))
img_resized = img_orig.convert('L').resize((basewidth, hsize), Image.ANTIALIAS)
if binarize:
img_resized = binarize_array(img_resized, 200)
Now we end up with an image looking like this:
Let's see what comes out:
pytesseract.image_to_string(img_resized)
Out[34]: 'Stee'
pytesseract.image_to_string(img_resized, 'eng', config='--psm 6 --oem 1 -c tessedit_char_whitelist=0123456789.$£B')
Out[35]: ''
Ok, that didn't work. Let's try applying some filers:
img_min = img_resized.filter(ImageFilter.MinFilter)
img_mod = img_resized.filter(ImageFilter.ModeFilter)
img_med = img_resized.filter(ImageFilter.MedianFilter)
img_sharp = img_resized.filter(ImageFilter.SHARPEN)
pytesseract.image_to_string(img_min)
Out[36]: ''
pytesseract.image_to_string(img_mod)
Out[37]: 'oe Se'
pytesseract.image_to_string(img_med)
Out[38]: 'rete'
pytesseract.image_to_string(img_sharp)
Out[39]: 'ry'
Or maybe binarize will help?
numpy_array = np.array(image)
for i in range(len(numpy_array)):
for j in range(len(numpy_array[0])):
if numpy_array[i][j] > threshold:
numpy_array[i][j] = 255
else:
numpy_array[i][j] = 0
img_binarized = Image.fromarray(numpy_array)
pytesseract.image_to_string(img_binarized)
Out[42]: 'Sion'
pytesseract.image_to_string(img_binarized, 'eng', config='--psm 6 --oem 1 -c tessedit_char_whitelist=0123456789.$£B')
Out[44]: '0'
Again, all totally wrong.
What else can I do?
Any suggestions would be greatly appreciated.
Add on example:
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
threshold_img = cv2.threshold(gray, 100, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)[1]
im_pil = cv2_to_pil(threshold_img)
pytesseract.image_to_string(im_pil)
Out[5]: 'TUM'
or trying another suggested algo for:
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
threshold_img = cv2.threshold(gray, 100, 255, cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)[1]
im_pil = cv2_to_pil(threshold_img)
pytesseract.image_to_string(im_pil, 'eng', config='--psm 7')
Out[5]: '$1.99'
I think you are making it too complicated here. I did simple Otsu thresholding on the image that you've provided and was able to get the output.
image_path = r'path/to/image'
img = cv2.imread(image_path)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
thresh = cv2.threshold(gray, 100, 255, cv2.THRESH_BINARY+cv2.THRESH_OTSU)[1]
cv2.imwrite('thresh.png', thresh)
detected_text = pytesseract.image_to_string(Image.open(image_path))
print(detected_text)
The image that I got after thresholding was
Tesseract was easily able to detect it and the output I got: $0.51
You can do it with or without Otsu. The trick for me was to invert the image so that it's black text on white background (which Tesseract seems to prefer).
EDIT One more trick for Tesseract is to add a border around the image. Tesseract does not like for the text to be too close to the edge.
import cv2
import pytesseract
import numpy as np
img = cv2.imread('one_twenty_nine.png', cv2.IMREAD_GRAYSCALE)
thresh = cv2.threshold(img, 100, 255, cv2.THRESH_BINARY_INV+cv2.THRESH_OTSU)[1]
cv2.imwrite('thresh.png', thresh)
detected_text = pytesseract.image_to_string(thresh, config = '--psm 7')
print(detected_text)
which gives
$1.29
I want to make a linear regression program which visualizes the data to user. I'm using EJML for calculations and ScalaFX for front end. Everything is going fine but when I plot the data using Scatter Chart, the line drawn from the data is set to be rectangles which cover up the original data points. I would like to know how I can change the size, shape and transparency etc. of the plotted points.
Almost all of guides around JavaFX say that I should modify the CSS file (which doesn't automatically exist) in order to style my chart. I don't know how to do that in ScalaFX or even that is it possible to do that way. My result of searching every possible tutorial has been fruitless.
import scalafx.application.JFXApp
import scalafx.scene.Scene
import scalafx.scene.chart.ScatterChart
import scalafx.collections.ObservableBuffer
import scalafx.scene.chart.NumberAxis
import scalafx.scene.chart.XYChart
import scalafx.scene.shape.Line
import org.ejml.simple.SimpleMatrix
import scala.math.pow
import scala.collection.mutable.Buffer
object Plotting extends JFXApp {
/*
* Below are some arbitrary x and y values for a regression line
*/
val xValues = Array(Array(1.0, 1.0, 1.0, 1.0, 1.0, 1.0), Array(14.0, 19.0, 22.0, 26.0, 31.0, 43.0))
val yValues = Array(Array(51.0, 57.0, 66.0, 71.0, 72.0, 84.0))
val temp = yValues.flatten
val wrapper = xValues(1).zip(temp)
/*
* In the lines before stage what happens is that matrices for the x and y values are created, coefficients
* for the regression line are calculated with matrix operations and (x, y) points are calculated for the
* regression line.
*/
val X = new SimpleMatrix(xValues).transpose
val Y = new SimpleMatrix(yValues).transpose
val secondX = new SimpleMatrix(xValues(0).size, 2)
for (i <- 0 until xValues(0).size) {
secondX.set(i, 0, xValues(0)(i))
secondX.set(i, 1, xValues(1)(i))
}
val invertedSecondX = secondX.pseudoInverse()
val B = invertedSecondX.mult(Y)
val graphPoints = Buffer[(Double, Double)]()
for (i <- 0 to xValues(1).max.toInt) {
graphPoints.append((i.toDouble, B.get(0, 0) + i * B.get(1, 0)))
}
stage = new JFXApp.PrimaryStage {
title = "Demo"
scene = new Scene(400, 400) {
val xAxis = NumberAxis()
val yAxis = NumberAxis()
val pData = XYChart.Series[Number, Number](
"Data",
ObservableBuffer(wrapper.map(z => XYChart.Data[Number, Number](z._1, z._2)): _*))
val graph = XYChart.Series[Number, Number](
"RegressionLine",
ObservableBuffer(graphPoints.map(z => XYChart.Data[Number, Number](z._1, z._2)): _*))
val plot = new ScatterChart(xAxis, yAxis, ObservableBuffer(graph, pData))
root = plot
}
}
}
This certainly isn't as well documented as it might be... :-(
Stylesheets are typically placed in your project's resource directory. If you're using SBT (recommended), this would be src/main/resources.
In this example, I've added a stylesheet called MyCharts.css to this directory with the following contents:
/* Blue semi-transparent 4-pointed star, using SVG path. */
.default-color0.chart-symbol {
-fx-background-color: blue;
-fx-scale-shape: true;
-fx-shape: "M 0.0 10.0 L 3.0 3.0 L 10.0 0.0 L 3.0 -3.0 L 0.0 -10.0 L -3.0 -3.0 L -10.0 0.0 L -3.0 3.0 Z ";
-fx-opacity: 0.5;
}
/* Default shape is a rectangle. Here, we round it to become a red circle with a white
* center. Change the radius to control the size.
*/
.default-color1.chart-symbol {
-fx-background-color: red, white;
-fx-background-insets: 0, 2;
-fx-background-radius: 3px;
-fx-padding: 3px;
}
color0 will be used for the first data series (the regression line), color1 for the second (your scatter data). All other series use the default, JavaFX style.
(For more information on using scalable vector graphics (SVG) paths to define custom shapes, refer to the relevant section of the SVG specification.)
To have this stylesheet used by ScalaFX (JavaFX), you have a choice of options. To have them apply globally, add it to the main scene (which is what I've done below). Alternatively, if each chart needs a different style, you can add different stylesheets to specific charts. (BTW, I also added the standard includes import as this prevents many JavaFX-ScalaFX element conversion issues; otherwise, I've made no changes to your sources.)
import scalafx.Includes._
import scalafx.application.JFXApp
import scalafx.scene.Scene
import scalafx.scene.chart.ScatterChart
import scalafx.collections.ObservableBuffer
import scalafx.scene.chart.NumberAxis
import scalafx.scene.chart.XYChart
import scalafx.scene.shape.Line
import org.ejml.simple.SimpleMatrix
import scala.math.pow
import scala.collection.mutable.Buffer
object Plotting extends JFXApp {
/*
* Below are some arbitrary x and y values for a regression line
*/
val xValues = Array(Array(1.0, 1.0, 1.0, 1.0, 1.0, 1.0), Array(14.0, 19.0, 22.0, 26.0, 31.0, 43.0))
val yValues = Array(Array(51.0, 57.0, 66.0, 71.0, 72.0, 84.0))
val temp = yValues.flatten
val wrapper = xValues(1).zip(temp)
/*
* In the lines before stage what happens is that matrices for the x and y values are created, coefficients
* for the regression line are calculated with matrix operations and (x, y) points are calculated for the
* regression line.
*/
val X = new SimpleMatrix(xValues).transpose
val Y = new SimpleMatrix(yValues).transpose
val secondX = new SimpleMatrix(xValues(0).size, 2)
for (i <- 0 until xValues(0).size) {
secondX.set(i, 0, xValues(0)(i))
secondX.set(i, 1, xValues(1)(i))
}
val invertedSecondX = secondX.pseudoInverse()
val B = invertedSecondX.mult(Y)
val graphPoints = Buffer[(Double, Double)]()
for (i <- 0 to xValues(1).max.toInt) {
graphPoints.append((i.toDouble, B.get(0, 0) + i * B.get(1, 0)))
}
stage = new JFXApp.PrimaryStage {
title = "Demo"
scene = new Scene(400, 400) {
// Add our stylesheet.
stylesheets.add("MyCharts.css")
val xAxis = NumberAxis()
val yAxis = NumberAxis()
val pData = XYChart.Series[Number, Number](
"Data",
ObservableBuffer(wrapper.map(z => XYChart.Data[Number, Number](z._1, z._2)): _*))
val graph = XYChart.Series[Number, Number](
"RegressionLine",
ObservableBuffer(graphPoints.map(z => XYChart.Data[Number, Number](z._1, z._2)): _*))
val plot = new ScatterChart(xAxis, yAxis, ObservableBuffer(graph, pData))
root = plot
}
}
}
For further information in the CSS formatting options available (changing shapes, colors, transparency, etc.) refer to the JavaFX CSS Reference Guide.
The result looks like this:
I almost don't dare to add somethig to Mike Allens solution (wich is very good, as always), but this did not work out for me because I could not get my scala to find and/or process the .css file.
I would have done it this way if possible, but I just could not get it to work.
Here is what I came up with:
Suppose I have some data to display:
val xyExampleData: ObservableBuffer[(Double, Double)] = ObservableBuffer(Seq(
1 -> 1,
2 -> 4,
3 -> 9))
Then I convert this to a Series for the LineChart:
val DataPoints = ObservableBuffer(xyExampleData map { case (x, y) => XYChart.Data[Number, Number](x, y) })
val PointsToDisplay = XYChart.Series[Number, Number]("Points", DataPoints)
now I put this again into a Buffer, maybe with some other data from different series.
val lineChartBuffer = ObservableBuffer(PointsToDisplay, ...)
and finally I create my lineChart, wich I call (with lack of creativity) lineChart:
val lineChart = new LineChart(xAxis, yAxis, lineChartBuffer) {...}
The lines between data points can be recoloured now easily with:
lineChart.lookup(".default-color0.chart-series-line").setStyle("-fx-stroke: blue;")
This will change the Line-colour of the FIRST Dataset in the LineChartBuffer.
If you want to change the Line-Properties for the second you call
lineChart.lookup(".default-color1.chart-series-line")...
There is also "-fx-stroke-width: 3px;" to set the with of the line.
"-fx-opacity: 0.1;"
"-fx-stroke-dash-array: 10;"
-fx-fill: blue;"
are also usefull, but dont call the above line repeatedly, because the second call will override the first.
Instead concatenate all the strings into one:
lineChart.lookup(".default-color0.chart-series-line").setStyle("-fx-stroke: blue;-fx-opacity: 0.1;-fx-stroke-dash-array: 10;-fx-fill: blue;")
Now for the formatting of the Symbols at each data-Point:
unfortunately there seems to be no other way than to format each Symbol seperately:
lineChart.lookupAll(".default-color0.chart-line-symbol").asScala foreach { node => node.setStyle("-fx-background-color: blue, white;") }
for this to run you need import scala.collection.JavaConverters._
for the conversion from a java set to a scala set.
One can also make all data-poins from only one data-set invisible, for example:
lineChart.lookupAll(".default-color1.chart-line-symbol").asScala foreach { node => node.setVisible(false) }
To say this is a nice solution would be exaggerated.
And it has the big disadvantage, that you have to recolour or reformat every Symbol after adding a new Datapoint to one of the series in LineChartBuffer. If you don't, the new Symbols will have standard colours and settings.
The Lines stay, ones they are recoloured, I can't say why.
But the good side of it, one can always reformat curves in a Line Chart afterwards like this!
Specifically, this is from an .ico file, so there is no "transparent" "info" attribute like you would get in a gif. The below example illustrates converting Yahoo!'s favicon to a png using the correct transparency index of "0", which I guessed. how to detect that the ico is in fact transparent and that the transparency index is 0 ?
import urllib2
import Image
import StringIO
resp = urllib2.urlopen("http://www.yahoo.com/favicon.ico")
image = Image.open(StringIO.StringIO(resp.read()))
f = file("test.png", "w")
# I guessed that the transparent index is 0. how to
# determine it correctly ?
image.save(f, "PNG", quality=95, transparency=0)
looks like someone recognized that PIL doesn't really read ICO correctly (I can see the same thing after reconciling its source code with some research on the ICO format - there is an AND bitmap which determines transparency)
and came up with this extension:
http://www.djangosnippets.org/snippets/1287/
since this is useful for non-django applications, I've reposted here with a few tweaks to its exception throws:
import operator
import struct
from PIL import BmpImagePlugin, PngImagePlugin, Image
def load_icon(file, index=None):
'''
Load Windows ICO image.
See http://en.wikipedia.org/w/index.php?oldid=264332061 for file format
description.
'''
if isinstance(file, basestring):
file = open(file, 'rb')
try:
header = struct.unpack('<3H', file.read(6))
except:
raise IOError('Not an ICO file')
# Check magic
if header[:2] != (0, 1):
raise IOError('Not an ICO file')
# Collect icon directories
directories = []
for i in xrange(header[2]):
directory = list(struct.unpack('<4B2H2I', file.read(16)))
for j in xrange(3):
if not directory[j]:
directory[j] = 256
directories.append(directory)
if index is None:
# Select best icon
directory = max(directories, key=operator.itemgetter(slice(0, 3)))
else:
directory = directories[index]
# Seek to the bitmap data
file.seek(directory[7])
prefix = file.read(16)
file.seek(-16, 1)
if PngImagePlugin._accept(prefix):
# Windows Vista icon with PNG inside
image = PngImagePlugin.PngImageFile(file)
else:
# Load XOR bitmap
image = BmpImagePlugin.DibImageFile(file)
if image.mode == 'RGBA':
# Windows XP 32-bit color depth icon without AND bitmap
pass
else:
# Patch up the bitmap height
image.size = image.size[0], image.size[1] >> 1
d, e, o, a = image.tile[0]
image.tile[0] = d, (0, 0) + image.size, o, a
# Calculate AND bitmap dimensions. See
# http://en.wikipedia.org/w/index.php?oldid=264236948#Pixel_storage
# for description
offset = o + a[1] * image.size[1]
stride = ((image.size[0] + 31) >> 5) << 2
size = stride * image.size[1]
# Load AND bitmap
file.seek(offset)
string = file.read(size)
mask = Image.fromstring('1', image.size, string, 'raw',
('1;I', stride, -1))
image = image.convert('RGBA')
image.putalpha(mask)
return image