How to get correct RGB color values from PNG with PLTE palette using NSBitmapImageRep in Swift? - swift

Given a PNG image, I would like to get the RGB color at a given pixel using Swift on macOS.
This is simply done by:
let d = Data(base64Encoded: "iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAIAAACQd1PeAAAADElEQVQImWP4z8AAAAMBAQCc479ZAAAAAElFTkSuQmCC")!
let nsImage = NSImage(data: d)!
let bitmapImageRep = NSBitmapImageRep(cgImage: nsImage.cgImage(forProposedRect: nil, context: nil, hints: [:])!)
let color = bitmapImageRep.colorAt(x: 0, y: 0)
print(color!.redComponent, color!.greenComponent, color!.blueComponent, separator: " ")
// 1.0 0.0 0.0
This minimal example contains a one pixel PNG in RGB encoding with a red pixel in base64 encoding to make this easily reproducible.
printf iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAIAAACQd1PeAAAADElEQVQImWP4z8AAAAMBAQCc479ZAAAAAElFTkSuQmCC | base64 --decode | pngcheck -v
File: stdin (140703128616967 bytes)
chunk IHDR at offset 0xfffffffffffffffb, length 13
1 x 1 image, 24-bit RGB, non-interlaced
chunk IDAT at offset 0xfffffffffffffffb, length 12
zlib: deflated, 256-byte window, default compression
chunk IEND at offset 0xfffffffffffffffb, length 0
No errors detected in stdin (3 chunks, -133.3% compression).
When changing this PNG to an indexed PNG with a PLTE Palette
printf iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAMAAAAoyzS7AAAAA1BMVEX/AAAZ4gk3AAAACklEQVQImWNgAAAAAgAB9HFkpgAAAABJRU5ErkJggg== | base64 --decode | pngcheck -v
File: stdin (140703128616967 bytes)
chunk IHDR at offset 0xfffffffffffffffb, length 13
1 x 1 image, 8-bit palette, non-interlaced
chunk PLTE at offset 0xfffffffffffffffb, length 3: 1 palette entry
chunk IDAT at offset 0xfffffffffffffffb, length 10
zlib: deflated, 256-byte window, default compression
chunk IEND at offset 0xfffffffffffffffb, length 0
No errors detected in stdin (4 chunks, -600.0% compression).
the snippet above changes to
let d = Data(base64Encoded: "iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAMAAAAoyzS7AAAAA1BMVEX/AAAZ4gk3AAAACklEQVQImWNgAAAAAgAB9HFkpgAAAABJRU5ErkJggg==")!
let nsImage = NSImage(data: d)!
let bitmapImageRep = NSBitmapImageRep(cgImage: nsImage.cgImage(forProposedRect: nil, context: nil, hints: [:])!)
let color = bitmapImageRep.colorAt(x: 0, y: 0)
print(color!.redComponent, color!.greenComponent, color!.blueComponent, separator: " ")
// 0.984313725490196 0.0 0.027450980392156862
It produces the unexpected output with a red component close to one and some non-zero blue component. Where does this change in color come from?

Related

Sending Email From Databricks Notebooks

i want to send email from databricks notebooks, based on this article: https://docs.databricks.com/user-guide/faq/send-email.html
I am following the steps, however I got an error: UnicodeDecodeError: 'utf-8' codec can't decode byte 0x89 in position 0: invalid start byte
And, I think, the reason is because inside the function makeCompatibleImage we have this snipet: val = "" % base64.standard_b64encode(png.read()), and probably there is something wrong with base64.standard_b64encode
import numpy as np
import matplotlib.pyplot as plt
# Compute pie slices
N = 20
theta = np.linspace(0.0, 2 * np.pi, N, endpoint=False)
radii = 10 * np.random.rand(N)
width = np.pi / 4 * np.random.rand(N)
ax = plt.subplot(111, projection='polar')
bars = ax.bar(theta, radii, width=width, bottom=0.0)
# Use custom colors and opacity
for r, bar in zip(radii, bars):
bar.set_facecolor(plt.cm.viridis(r / 10.))
bar.set_alpha(0.5)
# Convert image add append to html array
html.append(makeCompatibleImage(ax))
#
UnicodeDecodeError: 'utf-8' codec can't decode byte 0x89 in position 0: invalid start byte
---------------------------------------------------------------------------
UnicodeDecodeError Traceback (most recent call last)
<command-890455078841631> in <module>()
16 bar.set_alpha(0.5)
17 # Convert image add append to html array
---> 18 html.append(makeCompatibleImage(ax))
<command-890455078841625> in makeCompatibleImage(image, withLabel)
11 val = None
12 with open(imageName) as png:
---> 13 val = "<img src='data:image/png;base64,%s'>" % base64.standard_b64encode(png.read())
14
15 displayHTML(val)
/databricks/python/lib/python3.6/codecs.py in decode(self, input, final)
319 # decode input (taking the buffer into account)
320 data = self.buffer + input
--> 321 (result, consumed) = self._buffer_decode(data, self.errors, final)
322 # keep undecoded input until the next call
323 self.buffer = data[consumed:]
UnicodeDecodeError: 'utf-8' codec can't decode byte 0x89 in position 0: invalid start byte
I want to know how I can replicate this article.
Adding the following code in makeCompatibleImage function, to read the file in binary mode, worked for me:
with open(imageName, 'rb') as png:

How to edit exif metadata by graphicsmagick command?

How to edit or add exif metadata by graphicsmagick command? I've tried the following:
gm convert -set EXIF:XPKeywords "bird, sleep, moon" bird.jpg bird2.jpg
My command is executing successfully and the image is creating but the exif metadata is not updating.
I don't believe you can do that with GraphicsMagick, and I would suggest exiv2 like this:
exiv2 -M"add Exif.Image.XPKeywords Ascii 'bird,sleep,moon'" image.jpg
Then you can view them with jhead too:
jhead -v image.jpg
JFIF SOI marker: Units: 0 (aspect ratio) X-density=1 Y-density=1
Exif header 50 bytes long
Exif section in Intel order
(dir has 1 entries)
Windows-XP keywords = "bird,sleep,moon"
Approximate quality factor for qtable 0: 92 (scale 16.28, var 1.13)
Approximate quality factor for qtable 1: 92 (scale 16.20, var 0.15)
JPEG image is 2000w * 2000h, 3 color components, 8 bits per sample
File name : image.jpg
File size : 6968929 bytes
File date : 2015:10:20 09:23:24
Resolution : 2000 x 2000

Sending Jcodec H264 Encoded RTMP Message to Wowza

I am making screen share java based application. I am done with encoding frames into H264 using JCodec java Library. I have Picture data in Byte Buffer.
How I will send these encoded frames to Wowza through rtmp client?
Can Wowza recognize the H264 encoded frames, Encoded by Jcodec library?
Pretty much any of the "flash" media servers will understand h264 data in a stream. You'll need to encode your frames with baseline or main profile and then "package" the encoded bytes into flv streaming format. The first step is creating an AMF video data item, what that means is prefixing and suffixing the h264 encoded byte array based on its "NALU" content; in pseudo code it looks something like this:
if idr
flv[0] = 0x17 // 0x10 key frame; 0x07 h264 codec id
flv[1] = 0x01 // 0 sequence header; 1 nalu; 2 end of seq
flv[2] = 0 // pres offset
flv[3] = 0 // pres offset
flv[4] = 0 // pres offset
flv[5] = 0 // size
flv[6] = 0 // size cont
flv[7] = 0 // size cont
flv[8] = 0 // size cont
else if coded slice
flv[0] = 0x27
flv[1] = 0x01
flv[2] = 0 // pres offset
flv[3] = 0 // pres offset
flv[4] = 0 // pres offset
flv[5] = 0 // size
flv[6] = 0 // size cont
flv[7] = 0 // size cont
flv[8] = 0 // size cont
else if PPS or SPS
.... skipping this here as its really complicated, this is the h264/AVC configuration data
copy(encoded, 0, flv, 9, encoded.length)
flv[flv.length - 1] = 0
The next step is packaging the AMF video data into an RTMP message. I suggest that you look at flazr or one of the android rtmp libraries for details on this step.
I've got a small example project that takes raw encoded h264 and writes it to an flv here if you want to see how its done.

How to divide or cut a YUV file?

I have a YUV file with 150 frames, I want to divide it into 2 files of 75 frames each. How to go bu doing this ? Are there any software's to do this ?
No specific SW is needed. All you need to do is read/write the number of bytes that corresponds to a frame into a new file. "Normally" the YCbCr format used is subsampled according to 4:2:0, i.e. the chroma samples are reduced by a factor of 2 both horizontally and vertically; meaning that 1 frame in YCbCr 4:2:0 corresponds to
1 frame = width x height x 3 / 2 bytes
If you are on a linux based system, you can use the dd utility to extract the first n-frames into a new file like this:
dd if=input.yuv bs=1 count=$((width*height*3/2*num_frames)) of=output.yuv
for the first 10 frames of a 1080p clip, the above would be:
dd if=input.yuv bs=1 count=$((1920*1080*3/2*10)) of=output.yuv
or
dd if=input.yuv bs=1 count=3110400 of=output.yuv
or use your favorite programming/scripting language to do this.
For example, the following python-scripts writes the first 10 frames to a new file (one frame at a time), tweek it to your needs:
#!/usr/bin/env python
f_in = 'BQMall_832x480_60.yuv'
f_out = 'BQMall_first_10_frames.yuv'
f_size = 832*480*3/2
with open(f_in, 'rb') as fd_in, open(f_out, 'wb') as fd_out:
for i in range(10):
data = fd_in.read(f_size)
fd_out.write(data)
I suggest that "do not use bs = 1"
dd if=176x144.yuv bs= $((176 * 144 * 3 / 2)) count=$FrameNo of=output.yuv

How to remove header of a wavfile in Matlab

I need to remove first 1024 bytes of a wave file. I tried to do but I got corrupted/distorted wavfile:
wavFile = fopen(fullFileName, 'r'); % Open file for reading
fseek (wavFile, 1024, -1);% Skip past header, which is 1024 bytes long.
wF = fread (wavFile, inf, 'int16');% 16-bit signed integers
wF = wF(:)';
newWavFile = fopen(strcat('new_',fileNames(fileNo).name), 'w+');% Open file for reading
fwrite(newWavFile,wF);
fclose(wavFile);
What can be the problem?
I could manage to remove header and correct the distortion by :
wavFile = fopen(fullFileName, 'r');% Open file for reading
fseek (wavFile, 1024, -1);% Skip past header, which is 1024 bytes long.
wF = fread (wavFile, inf, 'int16');% 16-bit signed integers
wF = wF(:)';
wF = 0.8*wF/max(abs(wF));
newWavFile =strcat('1_',fileNames(fileNo).name);% Open file for reading
wavwrite(wF,16000,16,newWavFile);