I get the following error when I attached coroutine TakePhoto to a button, can someone help?
Error:
Cannot get pixels when webcam is not running
UnityEngine.WebCamTexture:GetPixels ()
GetCam/d__6:MoveNext () (at Assets/GetCam.cs:52)
UnityEngine.GUIUtility:ProcessEvent (int,intptr,bool&)
CodeI took it from
```
using UnityEngine;
using System.Collections;
using System.IO;
using UnityEngine.UI;
public class GetCam: MonoBehaviour
{
WebCamTexture webCam;
string your_path = "C:\\Users\\Jay\\Desktop";
public RawImage display;
public AspectRatioFitter fit;
public void Start()
{
webCam = new WebCamTexture();
webCam.Play();
StartCoroutine(TakePhoto());
display.texture = webCam;
}
public void Update(){
float ratio = (float)webCam.width / (float)webCam.height;
fit.aspectRatio = ratio;
float ScaleY = webCam.videoVerticallyMirrored ? -1f: 1f;
display.rectTransform.localScale = new Vector3(1f,ScaleY, 1f);
int orient = -webCam.videoRotationAngle;
display.rectTransform.localEulerAngles = new Vector3(0,0,orient);
}
IEnumerator TakePhoto() // Start this Coroutine on some button click
{
// NOTE - you almost certainly have to do this here:
yield return new WaitForEndOfFrame();
// it's a rare case where the Unity doco is pretty clear,
// http://docs.unity3d.com/ScriptReference/WaitForEndOfFrame.html
// be sure to scroll down to the SECOND long example on that doco page
Texture2D photo = new Texture2D(webCam.width, webCam.height);
photo.SetPixels(webCam.GetPixels());
photo.Apply();
//Encode to a PNG
byte[] bytes = photo.EncodeToPNG();
//Write out the PNG. Of course you have to substitute your_path for something sensible
File.WriteAllBytes(your_path + "photo.png", bytes);
//lol
}
}
Related
I was making a platform that bounces the player after a certain period of time when the player touches it. I attached this code component to the GameObject, which is a tilemap, and it doesn't work properly. What's wrong with my code? The code and scriptable object are as follows.
using System.Collections;
using System.Collections.Generic;
using UnityEngine;
public class ExplosionPlatform : PlatformBase
{
public float explosionPower = 20f;
public float waitTime = 1.5f;
public float colTime = 0f;
public bool isDamageOnce = false;
public Vector2 area = new Vector2(2, 2);
public Vector2 positionModify = new Vector2(1, 0);
private void OnCollisionEnter2D(Collision2D collision)
{
StartCoroutine(Explosion());
}
private IEnumerator Explosion()
{
Debug.Log("ASDF");
yield return new WaitForSeconds(waitTime);
Collider2D[] player = Physics2D.OverlapBoxAll(transform.position + (Vector3)positionModify, area, 0);
foreach (Collider2D col in player)
{
if (col.CompareTag("Player"))
{
col.gameObject.GetComponent<Rigidbody2D>().AddForce(Vector2.up * explosionPower, ForceMode2D.Impulse);
Damage();
isDamageOnce = true;
yield return new WaitForSeconds(3f);
isDamageOnce = false;
}
}
}
private void OnDrawGizmos()
{
Gizmos.color = Color.blue;
Gizmos.DrawWireCube(transform.position + (Vector3)positionModify, area);
}
}
I have confirmed that the code below works properly on GameObject, which is Unity 2d Sprite. Is there any way to use that code while using the tile map?
This question already exists:
Cannot get pixels when webcam is not running
Closed 12 months ago.
I am trying to achieve a picture clicking app in unity and I already made a script, but the TakePhoto Coroutine doesnt seem to work when I press a button, I am using Start and Update functions from this video and TakePhoto Coroutine from this stackoverflow solution. Can someone please help me fix this??
This is my Code:
using UnityEngine;
using System.Collections;
using System.IO;
using UnityEngine.UI;
using System.Collections.Generic;
public class GetCam: MonoBehaviour
{
WebCamTexture webCam;
string your_path = "C:\\Users\\Jay\\Desktop";
public RawImage display;
public AspectRatioFitter fit;
public void Start()
{
webCam = new WebCamTexture();
webCam.Play();
StartCoroutine(TakePhoto());
display.texture = webCam;
}
public void lol(){ //starts the coroutine
StartCoroutine(TakePhoto());
}
public void Update(){
float ratio = (float)webCam.width / (float)webCam.height;
fit.aspectRatio = ratio;
float ScaleY = webCam.videoVerticallyMirrored ? -1f: 1f;
display.rectTransform.localScale = new Vector3(1f,ScaleY, 1f);
int orient = -webCam.videoRotationAngle;
display.rectTransform.localEulerAngles = new Vector3(0,0,orient);
}
IEnumerator TakePhoto() // Start this Coroutine on some button click
{
// NOTE - you almost certainly have to do this here:
yield return new WaitForEndOfFrame();
// it's a rare case where the Unity doco is pretty clear,
// http://docs.unity3d.com/ScriptReference/WaitForEndOfFrame.html
// be sure to scroll down to the SECOND long example on that doco page
Texture2D photo = new Texture2D(webCam.width, webCam.height);
photo.SetPixels(webCam.GetPixels());
photo.Apply();
//Encode to a PNG
byte[] bytes = photo.EncodeToPNG();
//Write out the PNG. Of course you have to substitute your_path for something sensible
File.WriteAllBytes(your_path + "photo.png", bytes);
//lol
}
}
Hi change your start function to something like this:
public void Start()
{
if(WebCamTexture.devices.Length==0)
{
Debug.LogError("can not found any camera!");
return;
}
int index = -1;
for (int i = 0; i < WebCamTexture.devices.Length; i++)
{
if (WebCamTexture.devices[i].name.ToLower().Contains("your webcam name"))
{
Debug.LogError("WebCam Name:" + WebCamTexture.devices[i].name + " Webcam Index:" + i);
index = i;
}
}
if (index == -1)
{
Debug.LogError("can not found your camera name!");
return;
}
WebCamDevice device = WebCamTexture.devices[index];
webCam = new WebCamTexture(device.name);
webCam.Play();
StartCoroutine(TakePhoto());
display.texture = webCam;
}
lien 87:
File.WriteAllBytes(your_path + "photo.png", bytes);
change it to
File.WriteAllBytes(your_path + "\\photo.png", bytes);
an it will be work. good luck.
complete code:
using UnityEngine;
using System.Collections;
using System.IO;
using UnityEngine.UI;
using System.Collections.Generic;
public class GetCam : MonoBehaviour
{
WebCamTexture webCam;
string your_path = "C:\\Users\\Jay\\Desktop";
public RawImage display;
public AspectRatioFitter fit;
public void Start()
{
if(WebCamTexture.devices.Length==0)
{
Debug.LogError("can not found any camera!");
return;
}
int index = -1;
for (int i = 0; i < WebCamTexture.devices.Length; i++)
{
if (WebCamTexture.devices[i].name.ToLower().Contains("your webcam name"))
{
Debug.LogError("WebCam Name:" + WebCamTexture.devices[i].name + " Webcam Index:" + i);
index = i;
}
}
if (index == -1)
{
Debug.LogError("can not found your camera name!");
return;
}
WebCamDevice device = WebCamTexture.devices[index];
webCam = new WebCamTexture(device.name);
webCam.Play();
StartCoroutine(TakePhoto());
display.texture = webCam;
}
public void Update()
{
float ratio = (float)webCam.width / (float)webCam.height;
fit.aspectRatio = ratio;
float ScaleY = webCam.videoVerticallyMirrored ? -1f : 1f;
display.rectTransform.localScale = new Vector3(1f, ScaleY, 1f);
int orient = -webCam.videoRotationAngle;
display.rectTransform.localEulerAngles = new Vector3(0, 0, orient);
}
public void callTakePhoto()
{
StartCoroutine(TakePhoto());
}
IEnumerator TakePhoto() // Start this Coroutine on some button click
{
// NOTE - you almost certainly have to do this here:
yield return new WaitForEndOfFrame();
// it's a rare case where the Unity doco is pretty clear,
// http://docs.unity3d.com/ScriptReference/WaitForEndOfFrame.html
// be sure to scroll down to the SECOND long example on that doco page
Texture2D photo = new Texture2D(webCam.width, webCam.height);
photo.SetPixels(webCam.GetPixels());
photo.Apply();
//Encode to a PNG
byte[] bytes = photo.EncodeToPNG();
//Write out the PNG. Of course you have to substitute your_path for something sensible
File.WriteAllBytes(your_path + "\\photo.png", bytes);
//lol
}
}
I'm receiving the error SetPixels32 called with invalid number of pixels in the array UnityEngine.Texture2D:SetPixels32(Color32[]) on a line of code that is trying to retrieve a pixel array.
In some instances I receive the error but in others the webcams stream just fine. I'm not sure why this is occurring. This is the line of code that is giving me problems:
streamTexture.SetPixels32(webcamTexture.GetPixels32(pixels))
That isn't much to go on, however, below is the full script. If Anyone can tell me why this error is occurring since the streaming texture is set to the dimensions of the webcam texture. Any help is much appreciated!
using System.Collections;
using UnityEngine;
using Photon.Pun;
using UnityEngine.UI;
public class WebcamStream : MonoBehaviourPun, IPunObservable
{
[SerializeField] private LocalPlayerSettings playerSettings;
[SerializeField] private AvatarHandler parent;
[SerializeField] private RawImage streamRawimage;
private WebCamTexture webcamTexture;
private Texture2D streamTexture;
private Color32[] pixels;
private byte[] data;
private void OnEnable()
{
if (!parent.photonView.IsMine)
return;
InitWebcam();
}
private void OnDisable()
{
if (!parent.photonView.IsMine)
return;
webcamTexture.Stop();
}
private void InitWebcam()
{
//cast dimensions of target UI as ints for new webcam texture
int width = (int)streamRawimage.rectTransform.rect.width;
int height = (int)streamRawimage.rectTransform.rect.height;
//set new dimensions and target device
webcamTexture = new WebCamTexture(width, height)
{
deviceName = playerSettings.Webcam
};
//display webcam texture on the raw image and start camera
streamRawimage.material.mainTexture = webcamTexture;
webcamTexture.Play();
//set pixels and stream texture to match webcamTexture
pixels = new Color32[webcamTexture.width * webcamTexture.height];
streamTexture = new Texture2D(webcamTexture.width, webcamTexture.height, TextureFormat.RGB24, false);
//Begin Streaming Webcam Data
StartCoroutine(StreamWebcam());
}
private IEnumerator StreamWebcam()
{
while (webcamTexture.deviceName == playerSettings.Webcam)
{
if (webcamTexture.isPlaying)
{
//set the target texture pixels to the webcam texture pixels and apply get/set
streamTexture.SetPixels32(webcamTexture.GetPixels32(pixels));
streamTexture.Apply();
//convert image to byte array
data = streamTexture.EncodeToJPG();
}
yield return null;
}
webcamTexture.Stop();
if(WebCamTexture.devices.Length > 0)
{
InitWebcam();
}
}
public void OnPhotonSerializeView(PhotonStream stream, PhotonMessageInfo info)
{
if (stream.IsWriting)
{
//send the byte array through the stream
stream.SendNext(data);
}
else
{
//convert object received into byte array via cast
data = (byte[])stream.ReceiveNext();
//create new texture to load received data into
streamTexture = new Texture2D(1, 1, TextureFormat.RGB24, false);
streamTexture.LoadImage(data);
//set webcam raw image texture to the newly updated texture
streamRawimage.texture = streamTexture;
}
}
}
Solved! There was another script responsible for enabling this scripts game object and needed a yield for about 1 second.
Also a quick edit of streamRawimage.material.mainTexture = webcamTexture; to streamRawimage.texture = webcamTexture; fixed another issue surrounding a missing texture.
I'm trying to draw to a texture with the mouse. Mouse coords are given to a shader which outputs to a render texture (I've tried both regular RenterTexture and CustomRenderTexture which is affected directly by a material) and it doesn't seem to work.
I can tell from the material that the mouse's input is obtained, but nothing is visible on the rendertexture.
I'm starting to suspect that rendertextures aren't fully working in HDRP?
Hoping someone can point in the direction of what could be the real issue.
I'm on Unity 2019.3.0f3, shaders is HDRP Unlit Graph
This is my script:
using System.Collections;
using System.Collections.Generic;
using UnityEngine;
public class DrawWithMouse : MonoBehaviour
{
public Camera _camera;
public CustomRenderTexture _splatmap;
public Material _drawMaterial;
private RaycastHit _hit;
[Range(1, 100)]
public float _brushSize = 1f;
[Range(0, 10)]
public float _brushStrength = 1f;
private readonly float m_GUIsize = 256;
private readonly int m_RenderTexSize = 1024;
void Start()
{
_splatmap = new CustomRenderTexture(m_RenderTexSize, m_RenderTexSize, RenderTextureFormat.ARGBFloat, RenderTextureReadWrite.Linear)
{
name = "splatmap_CRT_generated",
initializationColor = Color.black,
initializationSource = CustomRenderTextureInitializationSource.Material,
initializationMaterial = _drawMaterial,
material = _drawMaterial,
doubleBuffered = true,
updateMode = CustomRenderTextureUpdateMode.OnDemand
};
_drawMaterial.SetVector("_DrawColor", Color.red);
_drawMaterial.SetTexture("_SplatMap", _splatmap);
}
void Update()
{
if (Input.GetKey(KeyCode.Mouse0))
{
if (Physics.Raycast(_camera.ScreenPointToRay(Input.mousePosition), out _hit, 100f))
{
_drawMaterial.SetVector("_InputPoint", new Vector4(_hit.textureCoord.x, _hit.textureCoord.y, 0, 0));
_drawMaterial.SetFloat("_BrushStrength", _brushStrength);
_drawMaterial.SetFloat("_BrushSize", _brushSize);
}
}
}
private void OnGUI()
{
GUI.DrawTexture(new Rect(0, 0, m_GUIsize, m_GUIsize), _splatmap, ScaleMode.StretchToFill, false, 1);
}
}
CustomRenderTextures and URP do not seem to play nicely together either, at least not in builds, as of 2020.1.2f1. I eventually had to give up and hack a solution together with a dedicated camera, layer and quad, because nothing I tried (and I tried a lot) would persuade the CustomRenderTexture to even initialise in a PC build, despite working perfectly in the editor.
I made a small 2D game by using unity3D(5.0.2f1). I dragged in some textures to use as the background in a scene. It worked well in the Unity Editor, but not when I built and ran it in a different window size. It didn't look as what I had seen in Unity Editor.
What should I do to change the texture size automatically when window size changes? Here is a screenshot of how the image looks in the editor and the build, as well as my build settings:
Maybe these will help!
//file ExtensionsHandy.cs
using UnityEngine;
using UnityEngine.UI;
using System.Collections;
public static class ExtensionsHandy
{
public static float OrthoScreenWidth(this Camera c)
{
return
c.ViewportToWorldPoint(new Vector3(1,1,10)).x
- c.ViewportToWorldPoint(new Vector3(0,1,10)).x;
}
public static float OrthoScreenHeight(this Camera c)
{
return
c.ViewportToWorldPoint(new Vector3(0,1,10)).y
- c.ViewportToWorldPoint(new Vector3(0,0,10)).y;
}
}
try like this
void Start()
{
float w = Camera.main.OrthoScreenWidth();
Debug.Log("w is " +w.ToString("f4");
}
Note. If not familiar with "extensions": quick tutorial.
Here are more very handy extensions for you.
These will actually move an object to the screen points!
For example,
transform.ScreenRight();
transform.ScreenTop();
the object, is now at the top-right of the screen!!!
And consider this ......
void Update()
{
transform.ScreenRight();
}
In that example: even if the user changes the orientation of the device, even if the user resizes the screen (Mac or Windows), the object is ALWAYS on the right of the screen!!
public static Vector3 WP( this Camera c, float x, float y, float z)
{
return c.ViewportToWorldPoint(new Vector3(x,y,z));
}
public static void ScreenLeft(this GameObject moveme)
{
Vector3 rr = moveme.transform.position;
Vector3 leftTop = Camera.main.WP(0f,1f,0f);
float leftX = leftTop.x;
rr.x = leftX;
moveme.transform.position = rr;
}
public static void ScreenRight(this GameObject moveme)
{
Vector3 rr = moveme.transform.position;
Vector3 rightTop = Camera.main.WP(1f,1f,0f);
float rightX = rightTop.x;
rr.x = rightX;
moveme.transform.position = rr;
}
public static void ScreenBottom(this GameObject moveme)
{
Vector3 rr = moveme.transform.position;
Vector3 bottomLeft = Camera.main.WP(0f,0f,0f);
float bottomY = bottomLeft.y;
rr.y = bottomY;
moveme.transform.position = rr;
}
public static void ScreenTop(this GameObject moveme)
{
Vector3 rr = moveme.transform.position;
Vector3 topLeft = Camera.main.WP(0f,1f,0f);
float topY = topLeft.y;
rr.y = topY;
moveme.transform.position = rr;
}
Hope it helps.....