I have simple fog shader that works just fine on Forward rendering. However, I need to make it work on deferred rendering too. I was told that I only have to change the script that is calling the shader.
My fog calling C# script:
using UnityEngine;
using UnityEngine.Rendering;
[ExecuteInEditMode]
[RequireComponent(typeof(Camera))]
public class Fog : MonoBehaviour
{
public Color fogColor;
public float minDistance;
public float maxDistance;
Shader _shader;
Material _material;
static class Uniforms
{
internal static readonly int _FogColor = Shader.PropertyToID("_FogColor");
internal static readonly int _MinMax = Shader.PropertyToID("_MinMax");
}
public enum BlendMode { Blend, Additive, Multiplicative};
public BlendMode blendMode;
void OnEnable()
{
if (_shader == null) {
_shader = Shader.Find("Hidden/Fog");
}
_material = new Material(_shader);
_material.hideFlags = HideFlags.DontSave;
}
void OnDisable()
{
DestroyImmediate(_material);
}
void OnRenderImage(RenderTexture source, RenderTexture destination)
{
_material.SetColor(Uniforms._FogColor, fogColor);
_material.SetVector(Uniforms._MinMax, new Vector4(minDistance, maxDistance, 0, 0));
Graphics.Blit(source, destination, _material, (int)blendMode);
}
}
Could you help me to find what exactly do I have to write to make this work with deferred rendering? I have read a lot about this and studied all of the Unity Docs about shaders, but there are no examples on how to use this with image effect shaders.
Related
I'm trying to draw to a texture with the mouse. Mouse coords are given to a shader which outputs to a render texture (I've tried both regular RenterTexture and CustomRenderTexture which is affected directly by a material) and it doesn't seem to work.
I can tell from the material that the mouse's input is obtained, but nothing is visible on the rendertexture.
I'm starting to suspect that rendertextures aren't fully working in HDRP?
Hoping someone can point in the direction of what could be the real issue.
I'm on Unity 2019.3.0f3, shaders is HDRP Unlit Graph
This is my script:
using System.Collections;
using System.Collections.Generic;
using UnityEngine;
public class DrawWithMouse : MonoBehaviour
{
public Camera _camera;
public CustomRenderTexture _splatmap;
public Material _drawMaterial;
private RaycastHit _hit;
[Range(1, 100)]
public float _brushSize = 1f;
[Range(0, 10)]
public float _brushStrength = 1f;
private readonly float m_GUIsize = 256;
private readonly int m_RenderTexSize = 1024;
void Start()
{
_splatmap = new CustomRenderTexture(m_RenderTexSize, m_RenderTexSize, RenderTextureFormat.ARGBFloat, RenderTextureReadWrite.Linear)
{
name = "splatmap_CRT_generated",
initializationColor = Color.black,
initializationSource = CustomRenderTextureInitializationSource.Material,
initializationMaterial = _drawMaterial,
material = _drawMaterial,
doubleBuffered = true,
updateMode = CustomRenderTextureUpdateMode.OnDemand
};
_drawMaterial.SetVector("_DrawColor", Color.red);
_drawMaterial.SetTexture("_SplatMap", _splatmap);
}
void Update()
{
if (Input.GetKey(KeyCode.Mouse0))
{
if (Physics.Raycast(_camera.ScreenPointToRay(Input.mousePosition), out _hit, 100f))
{
_drawMaterial.SetVector("_InputPoint", new Vector4(_hit.textureCoord.x, _hit.textureCoord.y, 0, 0));
_drawMaterial.SetFloat("_BrushStrength", _brushStrength);
_drawMaterial.SetFloat("_BrushSize", _brushSize);
}
}
}
private void OnGUI()
{
GUI.DrawTexture(new Rect(0, 0, m_GUIsize, m_GUIsize), _splatmap, ScaleMode.StretchToFill, false, 1);
}
}
CustomRenderTextures and URP do not seem to play nicely together either, at least not in builds, as of 2020.1.2f1. I eventually had to give up and hack a solution together with a dedicated camera, layer and quad, because nothing I tried (and I tried a lot) would persuade the CustomRenderTexture to even initialise in a PC build, despite working perfectly in the editor.
I am trying to get started with pure ECS in Unity 2018.3.6f1, I am starting simple by just having a sphere prefab move in one direction, but I am getting no prefabs created it seems.
I have a empty game object prefab with a RenderMesh that has a Sphere mesh and a simple material, and I have this script attached to the prefab also:
using System;
using Unity.Entities;
using UnityEngine;
[Serializable]
public struct Position : IComponentData
{
public Vector3 Value;
}
public class BoidPositionComponent : ComponentDataProxy<Position> { }
Then I have this SteeringSystem:
using System.Collections;
using System.Collections.Generic;
using Unity.Entities;
using Unity.Jobs;
using Unity.Burst;
using Unity.Mathematics;
using Unity.Transforms;
using UnityEngine;
public class SteeringSystem : JobComponentSystem
{
[BurstCompile]
struct SteeringJob : IJobProcessComponentData<Position>
{
public float deltaTime;
public void Execute(ref Position position)
{
Vector3 value = position.Value;
value = new Vector3(value.x + deltaTime + 1.0f, value.y, value.z);
position.Value = value;
}
}
protected override JobHandle OnUpdate(JobHandle inputDeps)
{
SteeringJob steeringJob = new SteeringJob
{
deltaTime = Time.deltaTime
};
JobHandle jobHandle = steeringJob.Schedule(this, inputDeps);
return jobHandle;
}
}
And lastly I have a empty game object in my scene with this script on:
using Unity.Entities;
using Unity.Rendering;
using Unity.Collections;
using UnityEngine;
public class ECSWorld : MonoBehaviour
{
public GameObject boidPrefab;
private static EntityManager entityManager;
private static RenderMesh renderMesh;
private static EntityArchetype entityArchetype;
// Start is called before the first frame update
void Start()
{
entityManager = World.Active.GetOrCreateManager<EntityManager>();
entityArchetype = entityManager.CreateArchetype(typeof(Position));
AddBoids();
}
void AddBoids()
{
int amount = 200;
NativeArray<Entity> entities = new NativeArray<Entity>(amount, Allocator.Temp);
entityManager.Instantiate(boidPrefab, entities);
for (int i = 0; i < amount; i++)
{
// Do stuff, like setting data...
entityManager.SetComponentData(entities[i], new Position { Value = Vector3.zero });
}
entities.Dispose();
}
}
But I am not seeing anything when I run the game. Should it not instanciate 200 og my prefab and have them move on the screen? What am I missing here?
Thank you
Søren
You will need a renderer that actually renders your boids. You have created a custom Position component, but there is no system that actually does rendering based on it. So all you do is create entities and modify your Position component in memory (you should see this in the entity debugger) but since you have no renderer, you will not see anything on screen.
For now I would suggest using the "Hybrid Renderer" package that is available in the package manager. It uses its own set of components:
Translation for the position in 3D space
Scale for the scale in world space
Rotation for the rotation in world space
RenderMeshfor the mesh to be renderer (you are already using this)
With the current ECS version you can actually just convert a classic game object into an entity by adding a "Convert To Entity" Monobehaviour to it. This makes editor-integration a lot easier as you don't need all those proxy components. The auto-conversion process will automatically add the Translation, Scale, Rotation and RenderMesh components to your ECS entity.
We have got various controllers developed for moving gameobjects. I have used magnetometer/gyro sensor to move game object using MUVSlide through following code:
using UnityEngine;
using ForestIndieGames.Muvslide;
public class Connection : MonoBehaviour {
private static bool created = false;
private bool newInputAvailable;
private MuvslideConnection muvslideConn;
private void Awake()
{
if (!created)
{
DontDestroyOnLoad(this.gameObject);
created = true;
muvslideConn = new MuvslideConnection();
}
}
private void OnApplicationQuit()
{
if (muvslideConn != null)
muvslideConn.Close();
}
private void Update()
{
if (muvslideConn.GetInputManager().IsNewMotionAvailable())
newInputAvailable = true;
}
public bool IsNewInputAvailable()
{
bool result = newInputAvailable;
newInputAvailable = false;
return result;
}
public Vector3 GetAngles()
{
float[] angles = muvslideConn.GetInputManager().GetOrientationDegrees();
return new Vector3(angles[0], angles[1], angles[2]);
}
}
What I am trying to achieve is to move a gameobject by a real light spot on the wall. The spot is on the wall and fed through a camera. When the spot moves I want game object to follow exactly. The light spot can be a specific color or IR or UV etc.
Any leads please
I am experiencing the strangest issue
I have a ray cast and when it touches a certain layer it calls my function which does a small animation.
The problem is, this only works on a single object, I have tried duplicating, copying the prefab, dragging prefab to the scene, it doesn't work.
Now I have this code below, and as you can see I have this line which allows me to access the script on public PlatformFall platfall; so I can call platfall.startFall();
Something I've noticed, If I drag a single item from the hierarchy to the public PlatFall in Inspector then that SINGLE object works as it should. ( in that it animates when startFall is called). HOWEVER, if I drag the prefab from my project to the inspector then they do not work. (Even if debug log shows that the method is called animation does not occur).
public class CharacterController2D : MonoBehaviour {
//JummpRay Cast
public PlatformFall platfall;
// LayerMask to determine what is considered ground for the player
public LayerMask whatIsGround;
public LayerMask WhatIsFallingPlatform;
// Transform just below feet for checking if player is grounded
public Transform groundCheck;
/*....
...*/
Update(){
// Ray Casting to Fallingplatform
isFallingPlatform = Physics2D.Linecast(_transform.position, groundCheck.position, WhatIsFallingPlatform);
if (isFallingPlatform)
{
Debug.Log("Here");
platfall.startFall();
}
Debug.Log(isFallingPlatform);
}
}
Platform Script
using System.Collections;
using System.Collections.Generic;
using UnityEngine;
public class PlatformFall : MonoBehaviour
{
public float fallDelay = 0.5f;
Animator anim;
Rigidbody2D rb2d;
void Awake()
{
Debug.Log("Awake Called");
anim = GetComponent<Animator>();
rb2d = GetComponent<Rigidbody2D>();
}
private void Start()
{
Debug.Log("Start Called");
}
//void OnCollisionEnter2D(Collision2D other)
//{
// Debug.Log(other.gameObject.tag);
// GameObject childObject = other.collider.gameObject;
// Debug.Log(childObject);
// if (other.gameObject.CompareTag("Feet"))
// {
// anim.SetTrigger("PlatformShake");
// Invoke("Fall", fallDelay);
// destroy the Log
// DestroyObject(this.gameObject, 4);
// }
//}
public void startFall()
{
anim.SetTrigger("PlatformShake");
Invoke("Fall", fallDelay);
Debug.Log("Fall Invoked");
// destroy the Log
// DestroyObject(this.gameObject, 4);
}
void Fall()
{
rb2d.isKinematic = false;
rb2d.mass = 15;
}
}
I understood from your post that you are always calling PlatformFall instance assigned from inspector. I think this changes will solve your problem.
public class CharacterController2D : MonoBehaviour {
private PlatformFall platfall;
private RaycastHit2D isFallingPlatform;
void FixedUpdate(){
isFallingPlatform = Physics2D.Linecast(_transform.position, groundCheck.position, WhatIsFallingPlatform);
if (isFallingPlatform)
{
Debug.Log("Here");
platfall = isFallingPlatform.transform.GetComponent<PlatformFall>();
platfall.startFall();
}
}
}
By the way, i assume that you put prefab to proper position to cast. And one more thing, you should make physics operations ,which affect your rigidbody, in FixedUpdate.
I'm making a 2D game about launching objects towards eachother. It is almost complete. However, when I run it on different devices, the offsets of certain gameobjects are messed up due to the different screen size. In the Unity editor, I'm using the free aspect view, and I've created my gameobjects so that with a camera size of 80, they all align perfectly. I think the problem here is that the screen resolution messes up the display, and because I'm using a fixed amount of Unity units to position my gameobjects, they are displayed weirdly when I run the game on the standalone. I've written a pixel perfect camera script, but it doesn't seem to help. The camera is pixel perfect, however, in order to compensate, the camera size is turned into something extremely small, or extremely large. I just want the same look across all devices and screen resolutions. A main problem here is that I want my GUI elements to display next to where the player is standing. My script is here.
using UnityEngine;
using UnityEngine.UI;
using System.Collections;
public class HoldTimeMultiplierPlayerFollowController : MonoBehaviour {
public float textYOffset = 50f;
public Text holdTimeMultiplierDisplay;
void Update() {
Vector3 position = Camera.main.WorldToScreenPoint(transform.position);
holdTimeMultiplierDisplay.gameObject.transform.position = new Vector3(position.x, position.y + textYOffset, position.z);
}
}
Anyone got any ideas?
BTW,
all my art is 32x32. The pixels to units ratio is always 1 to 1. The camera size in the editor is 81.92, when I'm using a free aspect screen size of 686x269. At those measurements, everything is displayed perfectly.
Any help is appreciated. Maybe a script, or a suggestion on how to implement it?
Other scripts (If you see any issues that need to be resolved or improvements that could be added, please tell me):
using UnityEngine;
using UnityEngine.UI;
using System.Collections;
public class PlayerMovementController : MonoBehaviour {
private int holdTimeMultiplier = 0;
public int maxHoldTimeMultiplier = 215;
public Text holdTimeMultiplierDisplayText;
private Rigidbody2D rbody;
void Awake() {
rbody = GetComponent<Rigidbody2D>();
if(rbody == null) {
Debug.LogError ("No Rigidbody2D detected on player.");
}
}
void Update() {
Vector2 mousePosition = new Vector2(Camera.main.ScreenToWorldPoint(Input.mousePosition).x,
Camera.main.ScreenToWorldPoint(Input.mousePosition).y);
Vector2 mouseDirectionFromPlayer = mousePosition - new Vector2(transform.position.x, transform.position.y);
if(Input.GetMouseButton(0)) {
if(holdTimeMultiplier < maxHoldTimeMultiplier) {
holdTimeMultiplier ++;
} else {
holdTimeMultiplier = 0;
}
} else {
if(holdTimeMultiplier != 0) {
rbody.AddForce(mouseDirectionFromPlayer * holdTimeMultiplier * 200);
holdTimeMultiplier = 0;
}
}
holdTimeMultiplierDisplayText.text = holdTimeMultiplier.ToString();
}
}
...
using UnityEngine;
using System.Collections;
public class SpawnNewObject : MonoBehaviour {
public GameObject[] UseableObjects;
public Transform SpawnPoint;
void Update() {
if(Input.GetKeyDown(KeyCode.N)) {
var randomObject = Random.Range(0,UseableObjects.Length);
GameObject UseableObject;
UseableObject = Instantiate(UseableObjects[randomObject], SpawnPoint.position, SpawnPoint.rotation) as GameObject;
}
}
}