I got a square in Unity with a size of 500x500(slightly transparent).
Additionally, I want to take a snapshot of this area. Therefore, I created the following code for taking a picture(the area has got a size of 500x500).
public class CameraShot : MonoBehaviour{
private int width = 500;
private int height = 500;
private string saveFolder = "/picture";
private string saveType = ".png";
public void MakePicture()
{
var texture = SetClipping();
EncodeAndSafe(texture);
}
private Texture2D SetClipping()
{
int x = (Screen.width - width) / 2;
int y = (Screen.height - height) / 2;
Debug.Log("[AR_Recognition] CameraShot: " + Screen.dpi);
var tex = new Texture2D(width, height, TextureFormat.RGB24, false);
Rect rect = new Rect(x, y, width, height);
tex.ReadPixels(rect, 0, 0);
tex.Apply();
return tex;
}
private void EncodeAndSafe(Texture2D texture)
{
int count = 1;
var bytes = texture.EncodeToPNG();
Destroy(texture);
string savePath = Application.persistentDataPath + saveFolder;
while (true) {
if (System.IO.File.Exists(savePath))
{
count++;
}
else
{
File.WriteAllBytes(savePath+count+saveType, bytes);
Debug.Log("[AR_Recognition] CameraShot: " + savePath + saveType);
break;
}
}
}
Now I got the following issue:
The picture taken is not matching the 500x500 square but I cannot figure out why.
Ty
EDIT: The Canvas information
Related
I see that when I create mesh and textures in Unity on each frame (30fps) feels like Unity doesn't release these data from memory after the usage.
There is my code
private bool UpdateFrame(int frameIdx)
{
bool result = true;
int readyBuffSize = DecoderAPI.stream_get_ready_buffer_size(m_stream);
if (m_currMeshFrameIndex != frameIdx
&& readyBuffSize > 0)
{
m_currMeshFrameIndex = frameIdx;
IntPtr frame = DecoderAPI.stream_get_next_frame_obj(m_stream);
if (frame == IntPtr.Zero)
{
result = false;
}
else
{
long sequentialFrameIdx = DecoderAPI.get_sequential_number(frame);
DebugMethod("UNITY UpdateFrame", $"readyBuffSize :: {readyBuffSize}");
DebugMethod("UNITY UpdateFrame", $"sequentialFrameIdx :: {sequentialFrameIdx}");
Mesh releaseFormer = m_meshFilter.mesh;
m_meshFilter.mesh = CrteateMesh(frame);
Texture2D texture = CreateTexture(frame);
m_meshRenderer.material.SetTexture("_MainTex", texture);
DecoderAPI.stream_release_frame_obj(m_stream, frame);
Destroy(releaseFormer); // does not seem to help: even when there are no more allocations in C++ the process grows endlessly
}
}
return result;
}
private Mesh CrteateMesh(IntPtr frame)
{
Mesh mesh = new Mesh();
//Vertices***
int vertexCount = DecoderAPI.frame_get_vertex_count(frame);
byte[] xyzBytes = new byte[vertexCount * 3 * 4];
IntPtr xyz = DecoderAPI.frame_get_vertex_xyz(frame);
Vector3[] vertices = new Vector3[vertexCount];
GCHandle handle = GCHandle.Alloc(vertices, GCHandleType.Pinned);
IntPtr pointer = handle.AddrOfPinnedObject();
Marshal.Copy(xyz, xyzBytes, 0, xyzBytes.Length);
Marshal.Copy(xyzBytes, 0, pointer, xyzBytes.Length);
handle.Free();
mesh.vertices = vertices;
//***
//Faces***
int faceCount = DecoderAPI.frame_face_count(frame);
int trisArrSize = faceCount * 3;
int[] tris = new int[trisArrSize];
IntPtr indices = DecoderAPI.frame_face_indices(frame);
Marshal.Copy(indices, tris, 0, trisArrSize);
mesh.triangles = tris;
//***
mesh.RecalculateNormals();
//UV***
int uvCount = DecoderAPI.frame_get_uv_count(frame);
IntPtr uvData = DecoderAPI.frame_get_uv_data(frame);
int uvArrSize = uvCount * 2;
float[] uvArr = new float[uvArrSize];
Vector2[] uv = new Vector2[uvCount];
Marshal.Copy(uvData, uvArr, 0, uvArrSize);
for (int i = 0; i < uvCount; i++)
{
Vector2 result = new Vector2(uvArr[i * 2], uvArr[i * 2 + 1]) * new Vector2(1, -1);
uv[i] = result;
}
mesh.uv = uv;
//***
if (vertexCount != uvCount)
{
long frameId = DecoderAPI.get_sequential_number(frame);
DebugMethod("UNITY CrteateMesh", $"HERE : in frame id :: {frameId}, vertexCount : {vertexCount}, uvCount : {uvCount}");
}
return mesh;
}
private Texture2D CreateTexture(IntPtr frame)
{
IntPtr textureObj = DecoderAPI.frame_get_texture_obj(frame);
DecoderAPI.TextureInfo textureInfo = DecoderAPI.texture_get_info(textureObj);
int width = textureInfo.width;
int height = textureInfo.height;
int channels = textureInfo.channels;
int stride = textureInfo.stride;
//DecoderAPI.ColorType colorType = textureInfo.color_type;
IntPtr pixels = textureInfo.pixels;
Texture2D texture = new Texture2D(width, height, TextureFormat.RGB24, false);
//Texture2D texture = new Texture2D(width, height, TextureFormat.DXT5, false);
texture.LoadRawTextureData(pixels, width * channels * height);
texture.Apply();
return texture;
}
So, what I do is - I create a mesh and texture for each frame use it and then I expect that Unity should release them from memory after the usage, but no. Ok, I found like this method Destroy(releaseFormer) should help, but anyway it is the same I see in TaskManager that memory grows endlessly...
For test I have tried -> I start my c++ code generate (let's say 100 frames) then I stop it (so my c++ doesn't allocate nothing) and I still see that memory grows up to the end. What I expect is - ok even if Unity doesn't release data that I don't need more, I loaded 100 frames that is it, why memory continues to grow?
Question is - how to release from memory all that frames that I don't need?
EDIT
I have changed this method, added Destroy in proper order
private bool UpdateFrame(int frameIdx)
{
bool result = true;
int readyBuffSize = -1;
if (m_stream != IntPtr.Zero)
{
readyBuffSize = DecoderAPI.stream_get_ready_buffer_size(m_stream);
}
if (m_currMeshFrameIndex != frameIdx
&& readyBuffSize > 0)
{
m_currMeshFrameIndex = frameIdx;
IntPtr frame = DecoderAPI.stream_get_next_frame_obj(m_stream);
if (frame == IntPtr.Zero)
{
result = false;
}
else
{
long sequentialFrameIdx = DecoderAPI.frame_get_sequential_number(frame);
DebugMethod("UNITY UpdateFrame", $"readyBuffSize :: {readyBuffSize}");
DebugMethod("UNITY UpdateFrame", $"sequentialFrameIdx :: {sequentialFrameIdx}");
if (m_meshFilter.mesh != null)
{
Destroy(m_meshFilter.mesh);
}
m_meshFilter.mesh = CrteateMesh(frame);
if (m_texture != null)
{
Destroy(m_texture);
}
m_texture = CreateTexture(frame);
m_meshRenderer.material.SetTexture("_MainTex", m_texture);
if (m_stream != IntPtr.Zero)
{
DecoderAPI.stream_release_frame_obj(m_stream, frame);
}
}
}
return result;
}
releaseFormer is the mesh right? Did you try calling Destroy on the texture object itself?
Another thread suggested Resources.UnloadUnusedAssets()
Personally I'd be trying to do this with a RenderTexture especially if the texture size doesn't change too often, though might not be possible for your use case
I need to use Bitmap class from System.Drawing.Bitmap, this is a function that works fine on windows Platform. But after I tried to run on Xamarin Forms, and installed nuget package System.Drawing, the program compiles correctly without errors.
But when running program I receive an error. Somehow seems to point to System.Drawing from windows, not the System.Drawing from the nuget package.
What I need to do is, get Photo from Camera and print it.
Below is the code to print. Problem is with "Bitmap" converter.
Tried several nuget packages, none worked:
System.Drawing.Common
Fast-Bitmap
Bitmap.Net
public byte[] PrintImage(byte[] PHOTO)
{
Bitmap bmp;
using (var ms = new MemoryStream(PHOTO))
{
bmp = new Bitmap(ms);
}
BitmapData data = GetBitmapData(bmp);
BitArray dots = data.Dots;
byte[] width = BitConverter.GetBytes(data.Width);
int offset = 0;
MemoryStream stream = new MemoryStream();
BinaryWriter bw = new BinaryWriter(stream);
// center command
bw.Write(27);
bw.Write('a');
bw.Write(1);
// print image
bw.Write((char)0x1B);
bw.Write('#');
bw.Write((char)0x1B);
bw.Write('3');
bw.Write((byte)24);
while (offset < data.Height)
{
bw.Write((char)0x1B);
bw.Write('*'); // bit-image mode
bw.Write((byte)33); // 24-dot double-density
bw.Write(width[0]); // width low byte
bw.Write(width[1]); // width high byte
for (int x = 0; x < data.Width; ++x)
{
for (int k = 0; k < 3; ++k)
{
byte slice = 0;
for (int b = 0; b < 8; ++b)
{
int y = (((offset / 8) + k) * 8) + b;
// Calculate the location of the pixel.
// It'll be at (y * width) + x.
int i = (y * data.Width) + x;
// If the image is shorter than 24 dots.
bool v = false;
if (i < dots.Length)
{
v = dots[i];
}
slice |= (byte)((v ? 1 : 0) << (7 - b));
}
bw.Write(slice);
}
}
offset += 24;
bw.Write((char)0x0A);
}
// Restore the line spacing to the default of 30 dots.
bw.Write((char)0x1B);
bw.Write('3');
bw.Write((byte)30);
bw.Flush();
byte[] bytes = stream.ToArray();
return bytes; // logo + Encoding.Default.GetString(bytes);
}
public BitmapData GetBitmapData(Bitmap bmp) // (string bmpFileName)
{
//using (var bitmap = (Bitmap)Bitmap.FromFile(bmpFileName))
using (var bitmap = bmp)
{
var threshold = 127;
var index = 0;
double multiplier = 570; // this depends on your printer
double scale = (double)(multiplier / (double)bitmap.Width);
int xheight = (int)(bitmap.Height * scale);
int xwidth = (int)(bitmap.Width * scale);
var dimensions = xwidth * xheight;
var dots = new BitArray(dimensions);
for (var y = 0; y < xheight; y++)
{
for (var x = 0; x < xwidth; x++)
{
var _x = (int)(x / scale);
var _y = (int)(y / scale);
var color = bitmap.GetPixel(_x, _y);
var luminance = (int)(color.R * 0.3 + color.G * 0.59 + color.B * 0.11);
dots[index] = (luminance < threshold);
index++;
}
}
return new BitmapData()
{
Dots = dots,
Height = (int)(bitmap.Height * scale),
Width = (int)(bitmap.Width * scale)
};
}
}
public class BitmapData
{
public BitArray Dots
{
get;
set;
}
public int Height
{
get;
set;
}
public int Width
{
get;
set;
}
}
Error occurs when function is called as:
byte[] _buffer = PrintImage(FOTO);
The error:
"Could not resolve type with token 01000119 from typeref (expected class 'System.Drawing.Bitmap' in assembly 'System.Drawing.Common, Version=4.0.1.0, Culture=neutral, PublicKeyToken=cc7b13ffcd2ddd51')"
I'm trying to achieve the chart similar to the attached screenshot. The problem is, how can I put values in each arc (70, 76) of the chart? I'm using Microchart library.
The attached screenshot is from SmartHotel360 xamarin demo app. It would be really helpful if someone can help me to point in the correct direction.
void PopulateChart()
{
var data = new[]
{
new Microcharts.Entry(50)
{
ValueLabel = "50",
Color = SKColor.Parse("#104950")
},
new Microcharts.Entry(70)
{
ValueLabel = "70",
Color = SKColor.Parse("#F7A4B9")
},
new Microcharts.Entry(90)
{
ValueLabel = "90",
Color = SKColor.Parse("#0084b4")
}
};
this.chartView.Chart = new TemperatureChart() { Entries = data };
}
using Microcharts;
using SkiaSharp;
using System;
using System.Linq;
namespace chart
{
public class TemperatureChart : Chart
{
public TemperatureChart()
{
BackgroundColor = SKColor.Parse("#F6F1E9");
}
public float CaptionMargin { get; set; } = 12;
public float LineSize { get; set; } = 18;
public float StartAngle { get; set; } = -180;
private float AbsoluteMinimum => Entries.Select(x => x.Value).Concat(new[] { MaxValue, MinValue, InternalMinValue ?? 0 }).Min(x => Math.Abs(x));
private float AbsoluteMaximum => Entries.Select(x => x.Value).Concat(new[] { MaxValue, MinValue, InternalMinValue ?? 0 }).Max(x => Math.Abs(x));
private float ValueRange => AbsoluteMaximum - AbsoluteMinimum;
public override void DrawContent(SKCanvas canvas, int width, int height)
{
var sumValue = Entries.Sum(x => Math.Abs(x.Value));
var radius = (Math.Min(width, height) - (2 * Margin)) / 2;
var cx = width / 2;
var cy = Convert.ToInt32(height / 1.25);
var lineWidth = (LineSize < 0) ? (radius / ((Entries.Count() + 1) * 2)) : LineSize;
var radiusSpace = lineWidth * 4;
foreach (var entry in Entries.OrderByDescending(e => e.Value))
{
DrawChart(canvas, entry, radiusSpace, cx, cy, lineWidth);
}
DrawCaption(canvas, cx, cy, radiusSpace);
}
public void DrawChart(SKCanvas canvas, Entry entry, float radius, int cx, int cy, float strokeWidth)
{
using (var paint = new SKPaint
{
Style = SKPaintStyle.Stroke,
StrokeWidth = strokeWidth,
StrokeCap = SKStrokeCap.Round,
Color = entry.Color,
IsAntialias = true
})
{
using (SKPath path = new SKPath())
{
var sweepAngle = 180 * (Math.Abs(entry.Value) - AbsoluteMinimum) / ValueRange;
path.AddArc(SKRect.Create(cx - radius, cy - radius, 2 * radius, 2 * radius), StartAngle, sweepAngle);
canvas.DrawPath(path, paint);
}
}
}
private void DrawCaption(SKCanvas canvas, int cx, int cy, float radius)
{
var minimum = 0;
var medium = Math.Round(Entries.Max(e => e.Value) / 2);
var maximum = Entries.Max(e => e.Value);
canvas.DrawCaptionLabels(string.Empty, SKColor.Empty, $"{minimum}°", SKColors.Black, LabelTextSize, new SKPoint(cx - radius - LineSize - CaptionMargin, cy), SKTextAlign.Center);
canvas.DrawCaptionLabels(string.Empty, SKColor.Empty, $"{medium}°", SKColors.Black, LabelTextSize, new SKPoint(cx, cy - radius - LineSize), SKTextAlign.Center);
canvas.DrawCaptionLabels(string.Empty, SKColor.Empty, $"{maximum}°", SKColors.Black, LabelTextSize, new SKPoint(cx + radius + LineSize + CaptionMargin, cy), SKTextAlign.Center);
}
}
}
I'm trying to build a program that takes your photo and places it with a different background, like a monument or so. So far, I was able to turn the camera on when I start the project with this code
webcamTexture = new WebCamTexture();
rawImage.texture = webcamTexture;
rawImage.material.mainTexture = webcamTexture;
webcamTexture.Play();
Texture2D PhotoTaken = new Texture2D (webcamTexture.width, webcamTexture.height);
PhotoTaken.SetPixels (webcamTexture.GetPixels ());
PhotoTaken.Apply ();
However, I can't take a screenshot or photo because it always ends up all black. I've tried different codes but nothing is working. Can someone please help? Thanks
EDIT
After some tries, this is the code I have:
WebCamTexture webcamTexture;
public RawImage rawImage;
void Start () {
webcamTexture = new WebCamTexture();
rawImage.texture = webcamTexture;
rawImage.material.mainTexture = webcamTexture;
webcamTexture.Play();
RenderTexture texture= new RenderTexture(webcamTexture.width, webcamTexture.height,0);
Graphics.Blit(webcamTexture, texture);
Button btn = yourButton.GetComponent<Button>();
btn.onClick.AddListener(OnClick);
}
public IEnumerator Coroutine(){
yield return new WaitForEndOfFrame ();
}
public void OnClick() {
var width = 767;
var height = 575;
Texture2D texture = new Texture2D(width, height);
texture.ReadPixels(new Rect(0, 0, width, height), 0, 0);
texture.Apply();
// Encode texture into PNG
var bytes = texture.EncodeToPNG();
//Destroy(texture);
File.WriteAllBytes (Application.dataPath + "/../SavedScreen.png", bytes);
}
and with this next code the screenshot is taken, but it takes a photo of the whole thing, and not just a bit of the screen.
void Start()
{
// Set the playback framerate!
// (real time doesn't influence time anymore)
Time.captureFramerate = frameRate;
// Find a folder that doesn't exist yet by appending numbers!
realFolder = folder;
int count = 1;
while (System.IO.Directory.Exists(realFolder))
{
realFolder = folder + count;
count++;
}
// Create the folder
System.IO.Directory.CreateDirectory(realFolder);
}
void Update()
{
// name is "realFolder/shot 0005.png"
var name = string.Format("{0}/shot {1:D04}.png", realFolder, Time.frameCount);
// Capture the screenshot
Application.CaptureScreenshot(name, sizeMultiplier);
}
}
You can take a screenshot like this in Unity
Application.CaptureScreenshot("Screenshot.png");
Reference
EDIT 1
To take a screenshot on a specific part of the screen use the following script:
var width = 400;
var height = 300;
var startX = 200;
var startY = 100;
var tex = new Texture2D (width, height, TextureFormat.RGB24, false);
tex.ReadPixels (Rect(startX, startY, width, height), 0, 0);
tex.Apply ();
// Encode texture into PNG
var bytes = tex.EncodeToPNG();
Destroy(tex);
File.WriteAllBytes(Application.dataPath + "/../SavedScreen.png", bytes);
Reference
I'm using itextsharp to join mutiple pdf documents and add a footer.
My code works fine - except for landscape pages - it isn't detecting the page rotation - the footer is not centerd for landscape:
public static int AddPagesFromStream(Document document, PdfCopy pdfCopy, Stream m, bool addFooter, int detailPages, string footer, int footerPageNumOffset, int numPages, string pageLangString, string printLangString)
{
CreateFont();
try
{
m.Seek(0, SeekOrigin.Begin);
var reader = new PdfReader(m);
// get page count
var pdfPages = reader.NumberOfPages;
var i = 0;
// add pages
while (i < pdfPages)
{
i++;
// import page with pdfcopy
var page = pdfCopy.GetImportedPage(reader, i);
// get page center
float posX;
float posY;
var rotation = page.BoundingBox.Rotation;
if (rotation == 0 || rotation == 180)
{
posX = page.Width / 2;
posY = 0;
}
else
{
posX = page.Height / 2;
posY = 20f;
}
var ps = pdfCopy.CreatePageStamp(page);
var cb = ps.GetOverContent();
// add footer
cb.SetColorFill(BaseColor.WHITE);
var gs1 = new PdfGState {FillOpacity = 0.8f};
cb.SetGState(gs1);
cb.Rectangle(0, 0, document.PageSize.Width, 46f + posY);
cb.Fill();
// Text
cb.SetColorFill(BaseColor.BLACK);
cb.SetFontAndSize(baseFont, 7);
cb.BeginText();
// create text
var pages = string.Format(pageLangString, i + footerPageNumOffset, numPages);
cb.ShowTextAligned(PdfContentByte.ALIGN_CENTER, printLangString, posX, 40f + posY, 0f);
cb.ShowTextAligned(PdfContentByte.ALIGN_CENTER, footer, posX, 28f + posY, 0f);
cb.ShowTextAligned(PdfContentByte.ALIGN_CENTER, pages, posX, 20f + posY, 0f);
cb.EndText();
ps.AlterContents();
// add page to new pdf
pdfCopy.AddPage(page);
}
// close PdfReader
reader.Close();
// return number of pages
return i;
}
catch (Exception e)
{
Console.WriteLine(e);
return 0;
}
}
How do I detect the page rotation (e.g. landscape) format in this case? The given example works for PdfReader but not for PdfCopy.
Edit:
Why do I need PdfCopy? I tried copying a word pdf export. Some word hyperlinks will not work when you try to copy pages with PdfReader. Only PdfCopy transfers all needed page informations.
Edit: (SOLVED)
You need to use reader.GetPageRotation(i);
You need to use reader.GetPageRotation(i);
Solved code:
public static int AddPagesFromStream(Document document, PdfCopy pdfCopy, Stream m, bool addFooter, int detailPages, string footer, int footerPageNumOffset, int numPages, string pageLangString, string printLangString)
{
CreateFont();
try
{
m.Seek(0, SeekOrigin.Begin);
var reader = new PdfReader(m);
// get page count
var pdfPages = reader.NumberOfPages;
var i = 0;
// add pages
while (i < pdfPages)
{
i++;
// import page with pdfcopy
var page = pdfCopy.GetImportedPage(reader, i);
// get page center
float posX;
float posY;
var rotation = reader.GetPageRotation(i);
if (rotation == 0 || rotation == 180)
{
posX = page.Width / 2;
posY = 0;
}
else
{
posX = page.Height / 2;
posY = 20f;
}
var ps = pdfCopy.CreatePageStamp(page);
var cb = ps.GetOverContent();
// add footer
cb.SetColorFill(BaseColor.WHITE);
var gs1 = new PdfGState {FillOpacity = 0.8f};
cb.SetGState(gs1);
cb.Rectangle(0, 0, document.PageSize.Width, 46f + posY);
cb.Fill();
// Text
cb.SetColorFill(BaseColor.BLACK);
cb.SetFontAndSize(baseFont, 7);
cb.BeginText();
// create text
var pages = string.Format(pageLangString, i + footerPageNumOffset, numPages);
cb.ShowTextAligned(PdfContentByte.ALIGN_CENTER, printLangString, posX, 40f + posY, 0f);
cb.ShowTextAligned(PdfContentByte.ALIGN_CENTER, footer, posX, 28f + posY, 0f);
cb.ShowTextAligned(PdfContentByte.ALIGN_CENTER, pages, posX, 20f + posY, 0f);
cb.EndText();
ps.AlterContents();
// add page to new pdf
pdfCopy.AddPage(page);
}
// close PdfReader
reader.Close();
// return number of pages
return i;
}
catch (Exception e)
{
Console.WriteLine(e);
return 0;
}
}
In your code sample, page is of type PdfImportedPage. You're asking for its Bounding Box. PdfImportedPage is of type PdfTemplate, so you're asking for the Bounding Box of an XObject. I doubt that will work.
You should ask the reader object for the rotation of the page. I guess you've already discovered that yourself because you say "The given example works for PdfReader, but not for PdfCopy".