Stop static mesh from moving with camera - unreal-engine4

I am trying to move the camera in the character class viewport. Whenever I hit 'play', the static mesh has repositioned itself almost as if its parented to the camera. So if I move the camera along the z axis, the mesh will not move but when I hit play it will have replaced itself to the relative position it was underneath the camera before I moved the camera component.
In other words, I am trying to unlink the movement between the two components.
I'm not sure how to disable this.
Here is my code for the character class.
// Copyright 1998-2018 Epic Games, Inc. All Rights Reserved.
#include "ZombieShooterCharacter.h"
#include "ZombieShooterProjectile.h"
#include "Animation/AnimInstance.h"
#include "Camera/CameraComponent.h"
#include "Components/CapsuleComponent.h"
#include "Components/InputComponent.h"
#include "GameFramework/InputSettings.h"
#include "HeadMountedDisplayFunctionLibrary.h"
#include "Kismet/GameplayStatics.h"
#include "MotionControllerComponent.h"
#include "XRMotionControllerBase.h" // for FXRMotionControllerBase::RightHandSourceId
DEFINE_LOG_CATEGORY_STATIC(LogFPChar, Warning, All);
//////////////////////////////////////////////////////////////////////////
// AZombieShooterCharacter
AZombieShooterCharacter::AZombieShooterCharacter()
{
// Set size for collision capsule
GetCapsuleComponent()->InitCapsuleSize(55.f, 96.0f);
// set our turn rates for input
BaseTurnRate = 45.f;
BaseLookUpRate = 45.f;
// Create a CameraComponent
FirstPersonCameraComponent = CreateDefaultSubobject<UCameraComponent>(TEXT("FirstPersonCamera"));
FirstPersonCameraComponent->SetupAttachment(GetCapsuleComponent());
FirstPersonCameraComponent->RelativeLocation = FVector(-39.56f, 1.75f, 64.f); // Position the camera
FirstPersonCameraComponent->bUsePawnControlRotation = true;
// Create a mesh component that will be used when being viewed from a '1st person' view (when controlling this pawn)
Mesh1P = CreateDefaultSubobject<USkeletalMeshComponent>(TEXT("CharacterMesh1P"));
Mesh1P->SetOnlyOwnerSee(true);
Mesh1P->SetupAttachment(FirstPersonCameraComponent);
Mesh1P->SetupAttachment(GetCapsuleComponent());
Mesh1P->bCastDynamicShadow = false;
Mesh1P->CastShadow = false;
Mesh1P->RelativeRotation = FRotator(1.9f, -19.19f, 5.2f);
Mesh1P->RelativeLocation = FVector(-0.5f, -4.4f, -155.7f);
// Create a gun mesh component
FP_Gun = CreateDefaultSubobject<USkeletalMeshComponent>(TEXT("FP_Gun"));
FP_Gun->SetOnlyOwnerSee(true); // only the owning player will see this mesh
FP_Gun->bCastDynamicShadow = false;
FP_Gun->CastShadow = false;
// FP_Gun->SetupAttachment(Mesh1P, TEXT("GripPoint"));
FP_Gun->SetupAttachment(RootComponent);
FP_MuzzleLocation = CreateDefaultSubobject<USceneComponent>(TEXT("MuzzleLocation"));
FP_MuzzleLocation->SetupAttachment(FP_Gun);
FP_MuzzleLocation->SetRelativeLocation(FVector(0.2f, 48.4f, -10.6f));
// Default offset from the character location for projectiles to spawn
GunOffset = FVector(100.0f, 0.0f, 10.0f);
// Note: The ProjectileClass and the skeletal mesh/anim blueprints for Mesh1P, FP_Gun, and VR_Gun
// are set in the derived blueprint asset named MyCharacter to avoid direct content references in C++.
}
void AZombieShooterCharacter::BeginPlay()
{
// Call the base class
Super::BeginPlay();
//Attach gun mesh component to Skeleton, doing it here because the skeleton is not yet created in the constructor
FP_Gun->AttachToComponent(Mesh1P, FAttachmentTransformRules(EAttachmentRule::SnapToTarget, true), TEXT("GripPoint"));
}
//////////////////////////////////////////////////////////////////////////
// Input
void AZombieShooterCharacter::SetupPlayerInputComponent(class UInputComponent* PlayerInputComponent)
{
// set up gameplay key bindings
check(PlayerInputComponent);
// Bind jump events
PlayerInputComponent->BindAction("Jump", IE_Pressed, this, &ACharacter::Jump);
PlayerInputComponent->BindAction("Jump", IE_Released, this, &ACharacter::StopJumping);
// Bind fire event
PlayerInputComponent->BindAction("Fire", IE_Pressed, this, &AZombieShooterCharacter::OnFire);
// Bind movement events
PlayerInputComponent->BindAxis("MoveForward", this, &AZombieShooterCharacter::MoveForward);
PlayerInputComponent->BindAxis("MoveRight", this, &AZombieShooterCharacter::MoveRight);
// We have 2 versions of the rotation bindings to handle different kinds of devices differently
// "turn" handles devices that provide an absolute delta, such as a mouse.
// "turnrate" is for devices that we choose to treat as a rate of change, such as an analog joystick
PlayerInputComponent->BindAxis("Turn", this, &APawn::AddControllerYawInput);
PlayerInputComponent->BindAxis("TurnRate", this, &AZombieShooterCharacter::TurnAtRate);
PlayerInputComponent->BindAxis("LookUp", this, &APawn::AddControllerPitchInput);
PlayerInputComponent->BindAxis("LookUpRate", this, &AZombieShooterCharacter::LookUpAtRate);
}
void AZombieShooterCharacter::OnFire()
{
// try and fire a projectile
if (ProjectileClass != NULL)
{
UWorld* const World = GetWorld();
if (World != NULL)
{
const FRotator SpawnRotation = GetControlRotation();
// MuzzleOffset is in camera space, so transform it to world space before offsetting from the character location to find the final muzzle position
const FVector SpawnLocation = ((FP_MuzzleLocation != nullptr) ? FP_MuzzleLocation->GetComponentLocation() : GetActorLocation()) + SpawnRotation.RotateVector(GunOffset);
//Set Spawn Collision Handling Override
FActorSpawnParameters ActorSpawnParams;
ActorSpawnParams.SpawnCollisionHandlingOverride = ESpawnActorCollisionHandlingMethod::AdjustIfPossibleButDontSpawnIfColliding;
// spawn the projectile at the muzzle
World->SpawnActor<AZombieShooterProjectile>(ProjectileClass, SpawnLocation, SpawnRotation, ActorSpawnParams);
}
}
// try and play the sound if specified
if (FireSound != NULL)
{
UGameplayStatics::PlaySoundAtLocation(this, FireSound, GetActorLocation());
}
// try and play a firing animation if specified
if (FireAnimation != NULL)
{
// Get the animation object for the arms mesh
UAnimInstance* AnimInstance = Mesh1P->GetAnimInstance();
if (AnimInstance != NULL)
{
AnimInstance->Montage_Play(FireAnimation, 1.f);
}
}
}
//Commenting this section out to be consistent with FPS BP template.
//This allows the user to turn without using the right virtual joystick
//void AZombieShooterCharacter::TouchUpdate(const ETouchIndex::Type FingerIndex, const FVector Location)
//{
// if ((TouchItem.bIsPressed == true) && (TouchItem.FingerIndex == FingerIndex))
// {
// if (TouchItem.bIsPressed)
// {
// if (GetWorld() != nullptr)
// {
// UGameViewportClient* ViewportClient = GetWorld()->GetGameViewport();
// if (ViewportClient != nullptr)
// {
// FVector MoveDelta = Location - TouchItem.Location;
// FVector2D ScreenSize;
// ViewportClient->GetViewportSize(ScreenSize);
// FVector2D ScaledDelta = FVector2D(MoveDelta.X, MoveDelta.Y) / ScreenSize;
// if (FMath::Abs(ScaledDelta.X) >= 4.0 / ScreenSize.X)
// {
// TouchItem.bMoved = true;
// float Value = ScaledDelta.X * BaseTurnRate;
// AddControllerYawInput(Value);
// }
// if (FMath::Abs(ScaledDelta.Y) >= 4.0 / ScreenSize.Y)
// {
// TouchItem.bMoved = true;
// float Value = ScaledDelta.Y * BaseTurnRate;
// AddControllerPitchInput(Value);
// }
// TouchItem.Location = Location;
// }
// TouchItem.Location = Location;
// }
// }
// }
//}
void AZombieShooterCharacter::MoveForward(float Value)
{
if (Value != 0.0f)
{
// add movement in that direction
AddMovementInput(GetActorForwardVector(), Value);
}
}
void AZombieShooterCharacter::MoveRight(float Value)
{
if (Value != 0.0f)
{
// add movement in that direction
AddMovementInput(GetActorRightVector(), Value);
}
}
void AZombieShooterCharacter::TurnAtRate(float Rate)
{
// calculate delta for this frame from the rate information
AddControllerYawInput(Rate * BaseTurnRate * GetWorld()->GetDeltaSeconds());
}
void AZombieShooterCharacter::LookUpAtRate(float Rate)
{
// calculate delta for this frame from the rate information
AddControllerPitchInput(Rate * BaseLookUpRate * GetWorld()->GetDeltaSeconds());
}
Thanks!

You're attaching the mesh to the camera. If you don't want to parent the two then remove this line:
Mesh1P->SetupAttachment(FirstPersonCameraComponent)
You should call SetupAttachment only once for each component

Related

texture will not load to player in sfml

I am trying to load a texture to my player class. I made the function set text to do this operation. but when I call it in the main loop (and I made sure it's not in the poll event) it just shows a white rectangle. I changed it to 32 bits, but it didn't work and then to 8 and that also didn't work (its original bit was 24). I also made sure that it was that the texture wasn't loading, and it is not. all responses are appreciated.
void setText1() {
sf::Texture p1text;
p1text.loadFromFile("player1.png");
rect.setTexture(&p1text);
}
main.cpp
sf::RenderWindow window(sf::VideoMode(800,800), "High noon showdown", sf::Style::Close | sf::Style::Titlebar);
window.setFramerateLimit(60);
player1 player1(50, 70, 85, 325);
player2 player2(50, 70, 715, 325);
player1.setText1();
//game loop
//movement
window.clear(sf::Color(210,180,140,100));
player1.collidewindow1();
player1.drawto1(window);
player2.collidewindow2();
player2.drawto2(window);
window.display();
player1.hpp
#pragma once
#include <SFML/Graphics.hpp>
#include <stdlib.h>
#include <stdio.h>
#include <SFML/Window.hpp>
#include "main.h"
class player1
{
public:
//get player size and position
player1(float x, float y, float px, float py) {
rect.setSize(sf::Vector2f(x,y));
rect.setPosition(sf::Vector2f(px, py));
}
//draw player
void drawto1(sf::RenderWindow &window) {
window.draw(rect);
}
void collidewindow1() {
//Top collision
if(rect.getPosition().y < 0.0f) {
rect.setPosition(rect.getPosition().x, 0.0f);
}
//Bottom collision
if(rect.getPosition().y + rect.getGlobalBounds().height > 800) {
rect.setPosition(rect.getPosition().x, 800 - rect.getGlobalBounds().height );
}
}
void upfalse1() {
up = false;
}
void downfalse1() {
down = false;
}
void moveup1() {
up = true;
if(up == true){
rect.move(0.0f, -5.0f * dt * 32.5f);
}
}
void movedown1() {
down = true;
if(down == true) {
rect.move(0.0f, 5.0f * dt * 32.5f);
}
}
void setText1() {
sf::Texture p1text;
p1text.loadFromFile("p1.png");
rect.setTexture(&p1text);
}
int getx1() {
return rect.getPosition().x;
}
int gety1() {
return rect.getPosition().y;
}
private:
sf::RectangleShape rect;
bool up;
bool down;
};
sf::Sprites need textures to work. They save the textures in pointer form so using temporary variable will not work. You need your texture to either be a dynamic variable or be in a place until the sprite you created is destroyed.
If you don't want to play with dynamic variables i suggest you put the texture variable into a 'player' object.
This is what's breaking your code:
void setText1() {
sf::Texture p1text; <- you create texture
p1text.loadFromFile("player1.png"); <- load the texture
rect.setTexture(&p1text); <- set the rect/sprite with texture
} <- texutre gets deleted thus sprite/rect point to adress with no texture
Fix:
class Player1
{
...
private:
sf::RectangleShape rect;
bool up;
bool down;
sf::Texture texture; <- void setText1() should set this texture
};

Poor screen share using Agora Virtual Camera Prefab from oculus

I'm currently working on an unity project, where I'm trying to screenshare my content from oculus quest 2 to a remote pc with audio chat enabled using Agora Virtual Camera Prefab package. I actually followed this blog for the implementation https://www.agora.io/en/blog/how-to-build-a-vr-video-chat-app-using-unitys-xr-framework/. I successfully implemented this and connected with a remote pc, but all I'm experiencing is a very poor screenshare of oculus(most of the times black screen) in the remote pc. But audio is better only a lag of 1 or 2 sec, plus even I'm able to see the remote pc user's cam video inside my game through oculus. It'll be lot helpful if anyone could help me with this issue, I'm totally stuck with this. I'm also attaching screenshot of the agora virtual camera prefab's config and the code used for any suggestions.
using System.Collections;
using agora_gaming_rtc;
using UnityEngine;
using UnityEngine.UI;
using static agora_gaming_rtc.ExternalVideoFrame;
using agora_utilities;
using System.Collections.Generic;
#if (UNITY_2018_3_OR_NEWER && UNITY_ANDROID)
using UnityEngine.Android;
#endif
public class AgoraVirtualCamera : MonoBehaviour
{
// Use this for initialization
#if (UNITY_2018_3_OR_NEWER && UNITY_ANDROID)
private ArrayList permissionList = new ArrayList();
#endif
// PLEASE KEEP THIS App ID IN SAFE PLACE
// Get your own App ID at https://dashboard.agora.io/
[Header("Agora Config")]
[SerializeField]
private string AppID = "";
[SerializeField]
private string TempToken = "";
[SerializeField]
private string TokenServerURL = "";
[SerializeField]
private string ChannelName = "";
[Header("Env Config")]
[SerializeField]
private Camera VirtualCam;
[SerializeField]
private GameObject RemoteVideoRoot;
[SerializeField]
private GameObject RemoteScreenVideoRoot;
/*[SerializeField]
private int ScreenShareUID;*/
[SerializeField]
private Text LogText;
[Header("UI Btn Config")]
public GameObject JoinBtn;
public GameObject LeaveBtn;
public GameObject MicBtn;
public GameObject QuitBtn;
public Color ActiveMicColor = Color.green;
public Color DisabledMicColor = Color.red;
[Header("Video Encoder Config")]
[SerializeField]
private VideoDimensions dimensions = new VideoDimensions
{
width = 1280,
height = 720
};
[SerializeField]
private int bitrate = 1130;
[SerializeField]
private FRAME_RATE frameRate = FRAME_RATE.FRAME_RATE_FPS_30;
[SerializeField]
private VIDEO_MIRROR_MODE_TYPE mirrorMode = VIDEO_MIRROR_MODE_TYPE.VIDEO_MIRROR_MODE_DISABLED;
// use bitrate: 2260 for broadcast mode
// Pixel format
public static TextureFormat ConvertFormat = TextureFormat.RGBA32;
public static VIDEO_PIXEL_FORMAT PixelFormat = VIDEO_PIXEL_FORMAT.VIDEO_PIXEL_RGBA;
private static int ShareCameraMode = 1; // 0 = unsafe buffer pointer, 1 = renderer image
// used for setting frame order
int timeStampCount = 0; // monotonic timestamp counter
// perspective camera buffer
private Texture2D BufferTexture;
// output log
private Logger logger;
// uid
private uint UID = 0; // 0 tells the agora engine to generate the uid
// reference to the active agora client
static AgoraInterface client = null;
// keep track of remote UID
Dictionary<string, List<uint>> RemoteUIDs = new Dictionary<string, List<uint>>();
// keep track of channel state
bool InChannel = false;
#region --- Life Cycles ---
void Awake()
{
// keep this alive across scenes
//DontDestroyOnLoad(this.gameObject);
}
// Start is called before the first frame update
void Start()
{
CheckAppId();// ensure an AppID is defined
// if there isn't a join button defined, autojoin
if (JoinBtn == null || !JoinBtn.activeInHierarchy)
{
JoinChannel();
}
}
// Update is called once per frame
void Update()
{
PermissionHelper.RequestMicrophontPermission();
PermissionHelper.RequestCameraPermission();
}
void OnDisable()
{
LeaveChannel();
}
void OnApplicationPause(bool paused)
{
if (client != null)
{
client.EnableVideo(paused);
client.EnableAudio(paused);
}
}
void OnApplicationQuit()
{
ShareCameraMode = 0;
if (client != null)
{
client.Leave();
client.UnloadEngine();
}
}
#endregion
#region --- Agora Functions ---
void ReloadAgoraEngine()
{
client = GetComponent<AgoraInterface>();
if (client != null)
{
client.Leave();
client.UnloadEngine();
Destroy(client);
client = null;
}
client = gameObject.AddComponent<AgoraInterface>();
client.SetLogger(logger);
// video config
VideoEncoderConfiguration videoEncodeConfig = new VideoEncoderConfiguration
{
dimensions = this.dimensions,
frameRate = this.frameRate,
bitrate = this.bitrate,
orientationMode = ORIENTATION_MODE.ORIENTATION_MODE_FIXED_LANDSCAPE,
mirrorMode = this.mirrorMode
};
client.SetVideoEncoderConfig(videoEncodeConfig);
}
// agora functions
public void JoinChannel()
{
// clean up and create a new one
ReloadAgoraEngine();
string appidMSG = string.Format("Initializing client with appid: ${0}", AppID);
logger.UpdateLog(appidMSG);
client.LoadEngine(AppID); // load engine
// Set up the texture for rendering POV as a texture
if (VirtualCam.isActiveAndEnabled)
{
logger.UpdateLog("Virtual Camera is Active and Enabled, Enable custom video source");
client.CustomVideo = true;
int width = Screen.width;
int height = Screen.height;
}
AddCallbackEvents(); // add custom event handling
if (TokenServerURL != "")
{
client.JoinWithTokenServer(ChannelName, UID, TokenServerURL);
}
else
{
// joing with or without a token
client.Join(ChannelName, TempToken, UID);
string joiningChannelMsg = string.Format("Joining channel: {0}, with uid: {1}", ChannelName, UID);
logger.UpdateLog(joiningChannelMsg);
}
}
public void LeaveChannel()
{
if (client != null)
{
client.Leave();
}
DisableSharing();
InChannel = false;
// change mic buttn text and color - help user visualize they left the channel
if (MicBtn != null)
{
MicBtn.GetComponentInChildren<Text>().text = "MIC";
MicBtn.GetComponent<Image>().color = Color.white;
}
// remove the remote video planes
if (gameObject.activeInHierarchy)
{
if (RemoteVideoRoot?.transform.childCount > 0)
{
foreach (Transform child in RemoteVideoRoot.transform)
{
GameObject.Destroy(child.gameObject);
}
StartCoroutine(UiUpdate(0.5f));
}
}
}
public void ToggleMic()
{
if (!InChannel)
return; // only toggle mic when in a channel
Text MicBtnText = MicBtn.GetComponentInChildren<Text>();
Image micBtnImg = MicBtn.GetComponent<Image>();
if (micBtnImg.color == Color.green)
{
client.MuteLocalAudioStream(true);
MicBtnText.text = "Mic OFF";
micBtnImg.color = DisabledMicColor;
}
else if (micBtnImg.color == Color.red)
{
client.MuteLocalAudioStream(false);
MicBtnText.text = "Mic ON";
micBtnImg.color = ActiveMicColor;
}
else
{
client.MuteLocalAudioStream(true); // mute by default
MicBtnText.text = "- MUTED -";
MicBtnText.color = Color.white;
micBtnImg.color = DisabledMicColor;
}
}
// Called by quit button
public void ExitApp()
{
#if UNITY_EDITOR
// Application.Quit() does not work in the editor so
// UnityEditor.EditorApplication.isPlaying need to be set to false to end the game
UnityEditor.EditorApplication.isPlaying = false;
#else
Application.Quit();
#endif
}
#endregion
#region --- Callback handlers ---
protected virtual void AddCallbackEvents()
{
IRtcEngine mRtcEngine = IRtcEngine.QueryEngine();
mRtcEngine.OnJoinChannelSuccess += OnJoinChannelSuccess;
mRtcEngine.OnUserJoined += OnUserJoined;
mRtcEngine.OnUserOffline += OnUserOffline;
}
public void OnJoinChannelSuccess(string channelName, uint uid, int elapsed)
{
InChannel = true;
if (VirtualCam != null && VirtualCam.isActiveAndEnabled)
{
logger.UpdateLog("Enable Virtual Camera Sharing");
EnableVirtualCameraSharing();
}
else
{
logger.UpdateLog("ERROR: Failed to find perspective camera.");
}
// update mic button color and text - visually show joined channel
if (MicBtn != null)
{
MicBtn.GetComponentInChildren<Text>().text = "MIC ON";
MicBtn.GetComponent<Image>().color = ActiveMicColor;
}
// enable dual stream mode
IRtcEngine mRtcEngine = IRtcEngine.QueryEngine();
mRtcEngine.EnableDualStreamMode(true);
}
public void OnUserJoined(uint uid, int elapsed)
{
// add video streams from all users in the channel
// offset the new video plane based on the parent's number of children.
//float xOffset = RemoteVideoRoot.transform.childCount * 3.5f;
//MakeVideoView(uid, RemoteVideoRoot, new Vector3(xOffset, 0, 0), Quaternion.Euler(270, 0, 0));
// to restrict which user video streams appear
// only show users with uid 100-1009 or 49024 (screen share)",
// uid 49024 is an arbitrary number that was selected and hardcoded as uid for the screen share stream from the web demo code. This uid can be customized
string remoteUIDtype;
if (uid >= 1000 && uid <= 1009)
{
// offset the new video plane based on the parent's number of children.
float xOffset = RemoteVideoRoot.transform.childCount * -3.69f;
MakeVideoView(uid, RemoteVideoRoot, new Vector3(xOffset, 0, 0), Quaternion.Euler(270, 180, 0), new Vector3(1.0f, 1.0f, 0.5625f));
remoteUIDtype = "admin";
} else if (uid == 49024 && RemoteScreenVideoRoot != null)
{
MakeVideoView(uid, RemoteScreenVideoRoot, new Vector3(0, 0, 0), Quaternion.Euler(270, 0, 0), new Vector3(-1.777f,-1.0f, -1.0f));
remoteUIDtype = "screen";
}
else
{
IRtcEngine mRtcEngine = IRtcEngine.QueryEngine();
// unsubscribe from video & audio streams
mRtcEngine.MuteRemoteVideoStream(uid, true);
mRtcEngine.MuteRemoteAudioStream(uid, true);
remoteUIDtype = "peer";
}
// keep track of the remote uids
logger.UpdateLog($"Make Remote Video UID type:{remoteUIDtype}");
if (RemoteUIDs.ContainsKey(remoteUIDtype))
{
RemoteUIDs[remoteUIDtype].Add(uid);
} else {
RemoteUIDs.Add(remoteUIDtype, new List<uint> { uid });
}
}
public void OnUserOffline(uint uid, USER_OFFLINE_REASON reason)
{
logger.UpdateLog("onUserOffline: update UI");
// update the position of the remaining children
StartCoroutine(UiUpdate(0.5f));
}
#endregion
#region --- misc helper functions ---
public void SetResolution(VideoDimensions newDimensions, int newBitrate)
{
dimensions = newDimensions;
bitrate = newBitrate;
VideoEncoderConfiguration videoEncodeConfig = new VideoEncoderConfiguration
{
dimensions = this.dimensions,
frameRate = this.frameRate,
bitrate = this.bitrate,
orientationMode = ORIENTATION_MODE.ORIENTATION_MODE_FIXED_LANDSCAPE,
mirrorMode = this.mirrorMode
};
client.SetVideoEncoderConfig(videoEncodeConfig);
}
private void CheckAppId()
{
logger = new Logger(LogText);
logger.DebugAssert(AppID.Length > 10, "Please fill in your AppId"); // Checks that AppID is set.
}
private void MakeVideoView(uint uid, GameObject parentNode, Vector3 position, Quaternion rotation, Vector3 scale)
{
logger.UpdateLog(string.Format("Make Remote Video View for UID: {0}.", uid));
GameObject go = GameObject.Find(uid.ToString());
if (go != null)
{
return; // reuse
}
// create a GameObject and assign to this new user
VideoSurface videoSurface = makePlaneSurface(uid.ToString(), parentNode, position, rotation, scale);
if (videoSurface != null)
{
// configure videoSurface
videoSurface.SetForUser(uid);
videoSurface.SetEnable(true);
videoSurface.SetVideoSurfaceType(AgoraVideoSurfaceType.Renderer);
videoSurface.SetGameFps(30);
}
}
// VIDEO TYPE 1: 3D Object
public VideoSurface makePlaneSurface(string goName, GameObject parentNode, Vector3 position, Quaternion rotation, Vector3 scale)
{
GameObject go = GameObject.CreatePrimitive(PrimitiveType.Plane);
if (go == null)
{
return null;
}
go.name = goName;
go.transform.localScale = scale; // scale the video (4:3)
if (parentNode != null)
{
go.transform.parent = parentNode.transform;
go.transform.localPosition = position;
go.transform.localRotation = rotation;
Debug.Log("add video view");
}
else
{
Debug.Log("parentNode is null video view");
go.transform.localPosition = new Vector3(0, 0, 0f);
go.transform.localRotation = Quaternion.Euler(270, 0, 0);
}
// configure videoSurface
VideoSurface videoSurface = go.AddComponent<VideoSurface>();
return videoSurface;
}
IEnumerator UiUpdate(float time)
{
yield return new WaitForSeconds(time);
// update the UI
for (int i = 0; i < RemoteVideoRoot.transform.childCount; i++)
{
float xOffset = -1 * i * 3.69f; // calculate the new position
RemoteVideoRoot.transform.GetChild(i).localPosition = new Vector3(xOffset, 0, 0); // update the position
}
}
#endregion
#region --- Virtual Camera video frame sharing ---
void EnableVirtualCameraSharing()
{
RenderTexture renderTexture = VirtualCam.targetTexture;
if (renderTexture != null)
{
BufferTexture = new Texture2D(renderTexture.width, renderTexture.height, ConvertFormat, false);
StartCoroutine(CoShareRenderData()); // use co-routine to push frames into the Agora stream
} else
{
logger.UpdateLog("Error: No Render Texture Found. Check Virtual Camera.");
}
}
void DisableSharing()
{
BufferTexture = null;
}
IEnumerator CoShareRenderData()
{
while (ShareCameraMode == 1)
{
yield return new WaitForEndOfFrame();
ShareRenderTexture();
}
yield return null;
}
private void ShareRenderTexture()
{
if (BufferTexture == null) // offlined
{
return;
}
Camera targetCamera = VirtualCam; // AR Camera
RenderTexture.active = targetCamera.targetTexture; // the targetTexture holds render texture
Rect rect = new Rect(0, 0, targetCamera.targetTexture.width, targetCamera.targetTexture.height);
BufferTexture.ReadPixels(rect, 0, 0);
BufferTexture.Apply();
byte[] bytes = BufferTexture.GetRawTextureData();
// sends the Raw data contained in bytes
//monoProxy.StartCoroutine(PushFrame(bytes, (int)rect.width, (int)rect.height,
//() =>
//{
// bytes = null;
//}));
StartCoroutine(PushFrame(bytes, (int)rect.width, (int)rect.height,
() =>
{
bytes = null;
}));
RenderTexture.active = null;
}
/// <summary>
/// Push frame to the remote client. This is the same code that does ScreenSharing.
/// </summary>
/// <param name="bytes">raw video image data</param>
/// <param name="width"></param>
/// <param name="height"></param>
/// <param name="onFinish">callback upon finish of the function</param>
/// <returns></returns>
IEnumerator PushFrame(byte[] bytes, int width, int height, System.Action onFinish)
{
if (bytes == null || bytes.Length == 0)
{
Debug.LogError("Zero bytes found!!!!");
yield break;
}
IRtcEngine rtc = IRtcEngine.QueryEngine();
//if the engine is present
if (rtc != null)
{
//Create a new external video frame
ExternalVideoFrame externalVideoFrame = new ExternalVideoFrame();
//Set the buffer type of the video frame
externalVideoFrame.type = ExternalVideoFrame.VIDEO_BUFFER_TYPE.VIDEO_BUFFER_RAW_DATA;
// Set the video pixel format
externalVideoFrame.format = PixelFormat; // VIDEO_PIXEL_RGBA
//apply raw data you are pulling from the rectangle you created earlier to the video frame
externalVideoFrame.buffer = bytes;
//Set the width of the video frame (in pixels)
externalVideoFrame.stride = width;
//Set the height of the video frame
externalVideoFrame.height = height;
//Remove pixels from the sides of the frame
externalVideoFrame.cropLeft = 10;
externalVideoFrame.cropTop = 10;
externalVideoFrame.cropRight = 10;
externalVideoFrame.cropBottom = 10;
//Rotate the video frame (0, 90, 180, or 270)
externalVideoFrame.rotation = 180;
// increment i with the video timestamp
//externalVideoFrame.timestamp = System.DateTime.Now.Ticks;
externalVideoFrame.timestamp = timeStampCount++;
//Push the external video frame with the frame we just created
int a = 0;
rtc.PushVideoFrame(externalVideoFrame);
if (timeStampCount % 100 == 0) Debug.Log(" pushVideoFrame(" + timeStampCount + ") size:" + bytes.Length + " => " + a);
}
yield return null;
onFinish();
}
#endregion
}

Stop outlining object when no longer looking at it?

I'm trying to make a pickup system and I thought it would be cool to do an outline around the item when you're looking at it. The issue I'm facing though is when you're no longer looking at the object I need to disable the outline. I ended up doing an odd solution and would like to get some help improving it.
public class PlayerCamera : MonoBehaviour
{
public Transform playerBody;
public Transform cameraHolder;
public float sensitivity;
public float currentY;
void Update()
{
MoveCamera();
LookingAtObject();
}
Outline objectOutline;
void LookingAtObject()
{
if(Physics.Raycast(cameraHolder.transform.position, cameraHolder.transform.forward, out var hit, Mathf.Infinity))
{
var obj = hit.collider.gameObject;
var outline = obj.GetComponent<Outline>();
if (obj && outline)
{
objectOutline = hit.transform.GetComponent<Outline>();
if (objectOutline)
objectOutline.OutlineWidth = 7;
}
else if (objectOutline)
objectOutline.OutlineWidth = 0;
}
}
}
You can store the outlined object in a variable, and whenever you hit a different outline object or hit nothing, set the outline back to zero.
Outline objectOutline;
void LookingAtObject()
{
if (Physics.Raycast(...))
{
var outline = hit.collider.GetComponent<Outline>();
// Make sure the hit object is not the same one we already outlined
//
if (outline != objectOutline)
{
// Remove the outline from our previously viewed object
//
if (objectOutline != null)
{
objectOutline.OutlineWidth = 0;
}
// Store the new outline object
//
objectOutline = outline;
// Since outline could be null, we need to check null before outlining
//
if (objectOutline != null)
{
objectOutline.OutlineWidth = 7;
}
}
}
// If we have an object we outlined and we didnt hit anything,
// remove the outline and reset the variable
//
else if (objectOutline != null)
{
objectOutline.OutlineWidth = 0;
objectOutline = null;
}
}
You need two events to solve the problem. Input frame and ray output frame. This code detects which raycast event is by recording the previous raycastHit frame and comparing it to the current hit, and sets the outline accordingly.
private RaycastHit lastHit;
void Update()
{
var ray = Camera.main.ScreenPointToRay(Input.mousePosition);
Physics.Raycast(ray, out var hit);
//Physics.Raycast(cameraHolder.transform.position, cameraHolder.transform.forward, out var hit, Mathf.Infinity);
if (hit.transform != lastHit.transform)
{
if (hit.transform) // when raycast Begin
{
var outline = hit.transform.GetComponent<Outline>();
outline.OutlineWidth = 7;
}
else if (lastHit.transform) // when raycast out
{
var outline = lastHit.transform.GetComponent<Outline>();
outline.OutlineWidth = 0;
}
}
lastHit = hit;
}
Hint: I commented on your raycast code for testing. If you want to change the raycast code as before.

How to resize all particles from a particle system?

I'm trying to dynamically resize particles using a slider, as well as change their colour.
Particles are used to display datapoints in a 3D scatterplot. I'm using this code: https://github.com/PrinzEugn/Scatterplot_Standalone
private ParticleSystem.Particle[] particlePoints;
void Update () {
pointScale = sizeSlider.value;
for (int i = 0; i < pointList.Count; i++) {
Quaternion quaternion = Camera.current.transform.rotation;
Vector3 angles = quaternion.eulerAngles;
// Set point color
particlePoints[i].startColor = new Color(angles.x, angles.y, angles.z, 1.0f);
particlePoints[i].transform.localScale = new Vector3(pointScale, pointScale, pointScale);
}
}
The issue is that there's no transform method for Particles, and changing the "startColour" doesn't change anything.
The API states that "The current size of the particle is calculated procedurally based on this value and the active size modules."
What does that mean, and how can I change the size of the particles ?
Thanks to previous answers I managed to get this working:
In the PlacePrefabPoints method I add every instantiated prefab to a List, and I add a listener to the slider, which looks like this:
void changedPointSize(){
pointScale = sizeSlider.value;
for (int i = 0; i < objects.Count; i++) {
objects[i].transform.localScale = new Vector3(pointScale, pointScale, pointScale);
}
}
Thanks all !
I just had a look at PointRenderer.cs -> CreateParticles and PlacePrefabPoints give a good hint what has to be changed.
So I guess you would simply change the scale values
foreach (var point in particlePoints)
{
Quaternion quaternion = Camera.current.transform.rotation;
Vector3 angles = quaternion.eulerAngles;
// Set point color
point.startColor = new Color(angles.x, angles.y, angles.z, 1.0f);
point.startSize = sizeSlider.value;
}
and than re-call
GetComponent<ParticleSystem>().SetParticles(particlePoints, particlePoints.Length);
it is questionable though if you really would do this in Update. I would rather do it in sizeSlider.onValueChanged to only do it when neccesarry (you could even make a certain treshold that has to be changed before updating the view) but well for the color there might be no other option than doing it in Update but atleast there I would use a Threshold:
privtae ParticleSystem ps;
// I assume you have that referenced in the inspector
public Slider sizeSlider;
// flag to control whether system should be updated
private bool updateSystem;
privtae void Awake()
{
ps = GetComponent<ParticleSystem>();
}
private void OnEnable()
{
// add a listener to onValueChanged
// it is secure to remove it first (even if not there yet)
// this makes sure it is not added twice
sizeSlider.onValueChanged.RemoveListener(OnsliderChanged());
sizeSlider.onValueChanged.AddListener(OnsliderChanged());
}
private void OnDisable()
{
// cleanup listener
sizeSlider.onValueChanged.RemoveListener(OnsliderChanged());
}
private void OnSliderChanged()
{
foreach (var point in particlePoints)
{
point.startSize = sizeSlider.value;
}
// do the same also for the instantiated prefabs
foreach(Transform child in PointHolder.transform)
{
child.localScale = Vecto3.one * sizeSlider.value;
}
updateSystem = true;
}
private Quaternion lastCameraRot;
public float CameraUpdateThreshold;
private void Update()
{
if(Quaternion.Angle(Camera.current.transform.rotation, lastCameraRot) > CameraUpdateThreshold)
{
foreach (var point in particlePoints)
{
Quaternion quaternion = Camera.current.transform.rotation;
Vector3 angles = quaternion.eulerAngles;
// Set point color
point.startColor = new Color(angles.x, angles.y, angles.z, 1.0f);
}
lastCameraRot = Camera.current.transform.rotation;
updateSystem = true;
}
if(!updateSystem) return;
updateSystem = false;
ps.SetParticles(particlePoints, particlePoints.Length);
}

How to show a tracking line from where the model is placed in Augmented Reality?

I am looking to show a line in my app from where the model is placed so that the user knows position where the model is kept in real world. When user changes device camera away from model the line gets turned on to show where the model is. Similarly it turns off when model is detected. I have attached images to show from a similar app white dotted lines show the path. Notice how the lines disappear when the model is detected.
LineRenderer lins;
public GameObject Lineprefab;
private GameObject newline;
public Transform startpoint;
public Renderer m_rend1;
bool HitTestWithResultType (ARPoint point, ARHitTestResultType resultTypes)
{
List<ARHitTestResult> hitResults = UnityARSessionNativeInterface.GetARSessionNativeInterface ().HitTest (point, resultTypes);
if (hitResults.Count > 0 && check==true)
{
foreach (var hitResult in hitResults)
{
Debug.Log ("Got hit!");
//obj.Hideplane();
Genplanes.SetActive(false);
if (Select == 0) {
Debug.Log("hit-zero!");
Instantiate(Instaobj[0], ForSelect);
check = false;
}
if (Select == 1) {
Debug.Log("hit-one!");
Instantiate(Instaobj[1], ForSelect);
check = false;
}
if (Select == 2) {
Debug.Log("hit-two!");
Instantiate(Instaobj[2], ForSelect);
check = false;
}
m_HitTransform.position = UnityARMatrixOps.GetPosition (hitResult.worldTransform);
m_HitTransform.rotation = UnityARMatrixOps.GetRotation (hitResult.worldTransform);
Debug.Log (string.Format ("x:{0:0.######} y:{1:0.######} z:{2:0.######}", m_HitTransform.position.x, m_HitTransform.position.y, m_HitTransform.position.z));
obj.StopPlaneTracking();
}
}
return false;
}
private void Start()
{
spawngenerator();
newline.SetActive(false);
m_rend1 = GetComponent<MeshRenderer>();
}
void spawngenerator()
{
GameObject newline = Instantiate(Lineprefab);
lins = newline.GetComponent<LineRenderer>();
}
private void LateUpdate()
{
lins.SetPosition(0, startpoint.position);
lins.SetPosition(1, m_HitTransform.position);
if( m_rend1.isVisible==true)
{
Debug.Log("Render is Visible");
newline.SetActive(false);
}
else if( m_rend1.isVisible==false)
{
newline.SetActive(true);
Debug.Log("It is InVisible");
Debug.Log("Render is InVisible");
}
}
void Update () {
#if UNITY_EDITOR //we will only use this script on the editor side, though there is nothing that would prevent it from working on device
if (Input.GetMouseButtonDown (0)) {
Ray ray = Camera.main.ScreenPointToRay (Input.mousePosition);
RaycastHit hit;
//we'll try to hit one of the plane collider gameobjects that were generated by the plugin
//effectively similar to calling HitTest with ARHitTestResultType.ARHitTestResultTypeExistingPlaneUsingExtent
if (Physics.Raycast (ray, out hit, maxRayDistance, collisionLayer)) {
//we're going to get the position from the contact point
m_HitTransform.position = hit.point;
Debug.Log (string.Format ("x:{0:0.######} y:{1:0.######} z:{2:0.######}", m_HitTransform.position.x, m_HitTransform.position.y, m_HitTransform.position.z));
//and the rotation from the transform of the plane collider
m_HitTransform.rotation = hit.transform.rotation;
}
}
#else
if (Input.touchCount > 0 && m_HitTransform != null )
{
var touch = Input.GetTouch(0);
if ((touch.phase == TouchPhase.Began || touch.phase == TouchPhase.Moved) && !EventSystem.current.IsPointerOverGameObject(touch.fingerId))
{
var screenPosition = Camera.main.ScreenToViewportPoint(touch.position);
ARPoint point = new ARPoint {
x = screenPosition.x,
y = screenPosition.y
};
// prioritize reults types
ARHitTestResultType[] resultTypes = {
//ARHitTestResultType.ARHitTestResultTypeExistingPlaneUsingGeometry,
ARHitTestResultType.ARHitTestResultTypeExistingPlaneUsingExtent,
// if you want to use infinite planes use this:
//ARHitTestResultType.ARHitTestResultTypeExistingPlane,
//ARHitTestResultType.ARHitTestResultTypeEstimatedHorizontalPlane,
//ARHitTestResultType.ARHitTestResultTypeEstimatedVerticalPlane,
//ARHitTestResultType.ARHitTestResultTypeFeaturePoint
};
foreach (ARHitTestResultType resultType in resultTypes)
{
if (HitTestWithResultType (point, resultType))
{
return;
}
}
}
}
#endif
}
.
First, I 'd start with checking if the model is within the bounding box of the camera https://docs.unity3d.com/ScriptReference/Renderer-isVisible.html
if the object is not visible (isVisible == false), create a line renderer from object position to wherever it should end.
The end point could be a camera child place just in front of it, so it looks like it starts from the user to the object.