I'm trying to implement shadows in the beam material from UE4 DMX Plugin, so I can use it with a flashlight and stop worrying about light trails cause by volumetric fog, the problem is that I really don't know how, actually, I don't know anything about HLSL.
So, if any tech artist wizard can help me and explain like I'm five or give me some material to study, it would be awesome.
Lightbeam code:
float traversalDepth = FDepth - NDepth ;
uint numSteps = floor(traversalDepth / StepSize) ;
float3 posOffset = normalize(FSlice-NSlice) * StepSize ;
float Adj = AdjOpp.x;
float Opp = AdjOpp.y + ConeRadius;
float3 cumul = 0;
for(uint i=0; i<numSteps; i++){
///Position & depth at rayHit
float3 pos = NSlice + posOffset * i ;
float depth = NDepth + StepSize * i ;
float dist = length(pos);
float falloff = 1.0f-(length(pos)/MaxDistance);
///Domain Transform
pos.z = -pos.z;
pos /= float3(Opp*2,Opp*2,Adj);
float div = ConeRadius / Opp;
div = (pos.z*(1-div))+div;
pos.xy /= div;
//Falloff old
//float falloff = 1.0-saturate(length(pos));
//Center domain
pos.z -= 0.5 ;
///Clip domain edges.
float maskX = (1-abs(pos.x)) > 0.5 ;
float maskY = (1-abs(pos.y)) > 0.5 ;
float maskZ = (1-abs(pos.z)) > 0.5 ;
if( (maskX*maskY*maskZ) - 0.5 < 0 ) continue ;
///Soft clipping with scene depth.
float dClip = saturate((ScDepth-depth)/SoftClipSize);
// UVs from pos
pos.xy = saturate(pos.xy+0.5);
float2 GoboUV = pos.xy;
float2 ColorUV = pos.xy;
// Gobo scale offset
GoboUV.x = GoboUV.x / NumGobos;
GoboUV.x = GoboUV.x + (GoboIndex/NumGobos) ;
// Gobo scrolling
GoboUV.x = GoboUV.x + (Time*GoboScrollingSpeed);
// Sample Gobo
float GoboSample = TXTpGobo.SampleLevel(TXTpGoboSampler,GoboUV.xy,0) ;
// Color Wheel scale offset
ColorUV.x = ColorUV.x / NumColors;
ColorUV.x = ColorUV.x + (ColorIndex/NumColors) ;
// Color scrolling
ColorUV.x = ColorUV.x + ((Time-CurrentTime) * ColorScrollingSpeed);
Material:
https://blueprintue.com/blueprint/zf-3xwb_/
Beam Material
Related
I need a little help,
I have this Perlin noise function, but I don't know how to properly create offsets.
I am using this to create infinite terrain generation and when I use this script it the noise values of individual chunks don't fit together properly. And they create holes.
Is there a way of fixing this ?
public float[,] GenerateNoise(int chunkSize, int octaves, string seed, float noiseScale, float persistence, float lacunarity, Vector2 offset)
{
if (noiseScale <= 0)
{
noiseScale = 0.0001f;
}
float halfWidth = chunkSize / 2f;
float halfHeight = chunkSize / 2f;
float[,] noiseMap = new float[chunkSize, chunkSize];
System.Random rand = new System.Random(seed.GetHashCode());
//Octaves offset
Vector2[] octavesOffset = new Vector2[octaves];
for (int i = 0; i < octaves; i++)
{
float offset_X = rand.Next(-100000, 100000) + offset.x;
float offset_Y = rand.Next(-100000, 100000) + offset.y;
octavesOffset[i] = new Vector2(offset_X / chunkSize , offset_Y / chunkSize);
}
for (int x = 0; x < chunkSize; x++)
{
for (int y = 0; y < chunkSize; y++)
{
float amplitude = 1;
float frequency = 1;
float noiseHeight = 0;
float superpositionCompensation = 0;
for (int i = 0; i < octaves; i++)
{
float sampleX = (x - halfWidth) / noiseScale * frequency + octavesOffset[i].x * frequency;
float sampleY = (y - halfHeight) / noiseScale * frequency + octavesOffset[i].y * frequency;
float noiseValue = Mathf.PerlinNoise(sampleX, sampleY);
noiseHeight += noiseValue * amplitude;
noiseHeight -= superpositionCompensation;
amplitude *= persistence;
frequency *= lacunarity;
superpositionCompensation = amplitude / 2;
}
noiseMap[x, y] = Mathf.Clamp01(noiseHeight);
}
}
return noiseMap;
}
It is quite simple actually, just add the chunk x,y coordinates to Mathf.PerlinNoise. Taking your code as an example, you can:
Pass chunkPosition as an argument to it:
public float[,] GenerateNoise(Vector2 chunkPos, int chunkSize, int octaves, string seed, float noiseScale, float persistence, float lacunarity, Vector2 offset)
Add it to Mathf.PerlinNoise invocation:
float noiseValue = Mathf.PerlinNoise(sampleX + chunkPos.x, sampleY + chunkPos.y);
Then make sure to generate each chunk with an appropriate chunkPos, where chunkPos can be its transform.position or whatever coordinates you have.
That's it.
I'm using the following library:
https://github.com/tengbao/vanta/blob/master/src/vanta.halo.js
A demo can be found here:
https://www.vantajs.com/?effect=halo
If I'm using a bright (or even white) background color, the effect is not visible anymore.
With my limited WebGL knowledge, my guess is that this is because of the subtraction of the background color (mixedColor = texture2D(...) - backgroundColor) (but I could be wrong).
void main() {
vec2 res2 = iResolution.xy * iDpr;
vec2 uv = gl_FragCoord.xy / res2; // 0 to 1
vec4 oldImage = texture2D(iBuffer, uv);
vec3 mixedColor = oldImage.rgb - backgroundColor;
float cropDist = 0.01;
float cropXOffset = 0.2;
float cropYOffset = 0.2;
vec2 offset = uv + vec2((mixedColor.g - cropXOffset) * cropDist, (mixedColor.r - cropYOffset) * cropDist);
float spinDist = 0.001;
float spinSpeed = 0.2 + 0.15 * cos(iTime * 0.5);
float timeFrac = mod(iTime, 6.5);
vec2 offset2 = uvBig + vec2(cos(timeFrac * spinSpeed) * spinDist, sin(timeFrac * spinSpeed) * spinDist);
mixedColor = texture2D(iBuffer, offset).rgb * 0.4
+ texture2D(iBuffer, offset2).rgb * 0.6
- backgroundColor;
float fadeAmt = 0.0015; // fade this amount each frame // 0.002
mixedColor = (mixedColor - fadeAmt) * .995;
vec4 spectrum = abs( abs( .95*atan(uv.x, uv.y) -vec4(0,2,4,0) ) -3. )-1.;
float angle = atan(pixel.x, pixel.y);
float dist = length(pixel - mouse2*0.15) * 8. + sin(iTime) * .01;
float flowerPeaks = .05 * amplitudeFactor * size;
float flowerPetals = 7.;
float edge = abs((dist + sin(angle * flowerPetals + iTime * 0.5) * sin(iTime * 1.5) * flowerPeaks) * 0.65 / size);
float colorChangeSpeed = 0.75 + 0.05 * sin(iTime) * 1.5;
float rainbowInput = timeFrac * colorChangeSpeed;
float brightness = 0.7;
vec4 rainbow = sqrt(j2hue(cos(rainbowInput))) + vec4(baseColor,0) - 1.0 + brightness;
float factor = smoothstep(1., .9, edge) * pow(edge, 2.);
vec3 color = rainbow.rgb * smoothstep(1., .9, edge) * pow(edge, 20.);
vec4 ring = vec4(
backgroundColor + clamp( mixedColor + color, 0., 1.)
, 1.0);
gl_FragColor = ring;
}
However I'm not able to figure out, how to adapt the behavior, so I can use a bright background.
If I remove the subtraction (and also remove the addition of the same at the end (vec4 ring = vec4(clamp(...))), I get the correct effect but with a black background.
Does anyone have an idea how to adapt the shader?
The problem is likely that backgroundColor is being added to the color to calculate the ring value. This will blow out your final color if backgroundColor is too bright.
I want to convert from cube map [figure1] into an equirectangular panorama [figure2].
Figure1
Figure2
It is possible to go from Spherical to Cubic (by following: Convert 2:1 equirectangular panorama to cube map ), but lost on how to reverse it.
Figure2 is to be rendered into a sphere using Unity.
Assuming the input image is in the following cubemap format:
The goal is to project the image to the equirectangular format like so:
The conversion algorithm is rather straightforward.
In order to calculate the best estimate of the color at each pixel in the equirectangular image given a cubemap with 6 faces:
Firstly, calculate polar coordinates that correspond to each pixel in
the spherical image.
Secondly, using the polar coordinates form a vector and determine on
which face of the cubemap and which pixel of that face the vector
lies; just like a raycast from the center of a cube would hit one of
its sides and a specific point on that side.
Keep in mind that there are multiple methods to estimate the color of a pixel in the equirectangular image given a normalized coordinate (u,v) on a specific face of a cubemap. The most basic method, which is a very raw approximation and will be used in this answer for simplicity's sake, is to round the coordinates to a specific pixel and use that pixel. Other more advanced methods could calculate an average of a few neighbouring pixels.
The implementation of the algorithm will vary depending on the context. I did a quick implementation in Unity3D C# that shows how to implement the algorithm in a real world scenario. It runs on the CPU, there is a lot room for improvement but it is easy to understand.
using UnityEngine;
public static class CubemapConverter
{
public static byte[] ConvertToEquirectangular(Texture2D sourceTexture, int outputWidth, int outputHeight)
{
Texture2D equiTexture = new Texture2D(outputWidth, outputHeight, TextureFormat.ARGB32, false);
float u, v; //Normalised texture coordinates, from 0 to 1, starting at lower left corner
float phi, theta; //Polar coordinates
int cubeFaceWidth, cubeFaceHeight;
cubeFaceWidth = sourceTexture.width / 4; //4 horizontal faces
cubeFaceHeight = sourceTexture.height / 3; //3 vertical faces
for (int j = 0; j < equiTexture.height; j++)
{
//Rows start from the bottom
v = 1 - ((float)j / equiTexture.height);
theta = v * Mathf.PI;
for (int i = 0; i < equiTexture.width; i++)
{
//Columns start from the left
u = ((float)i / equiTexture.width);
phi = u * 2 * Mathf.PI;
float x, y, z; //Unit vector
x = Mathf.Sin(phi) * Mathf.Sin(theta) * -1;
y = Mathf.Cos(theta);
z = Mathf.Cos(phi) * Mathf.Sin(theta) * -1;
float xa, ya, za;
float a;
a = Mathf.Max(new float[3] { Mathf.Abs(x), Mathf.Abs(y), Mathf.Abs(z) });
//Vector Parallel to the unit vector that lies on one of the cube faces
xa = x / a;
ya = y / a;
za = z / a;
Color color;
int xPixel, yPixel;
int xOffset, yOffset;
if (xa == 1)
{
//Right
xPixel = (int)((((za + 1f) / 2f) - 1f) * cubeFaceWidth);
xOffset = 2 * cubeFaceWidth; //Offset
yPixel = (int)((((ya + 1f) / 2f)) * cubeFaceHeight);
yOffset = cubeFaceHeight; //Offset
}
else if (xa == -1)
{
//Left
xPixel = (int)((((za + 1f) / 2f)) * cubeFaceWidth);
xOffset = 0;
yPixel = (int)((((ya + 1f) / 2f)) * cubeFaceHeight);
yOffset = cubeFaceHeight;
}
else if (ya == 1)
{
//Up
xPixel = (int)((((xa + 1f) / 2f)) * cubeFaceWidth);
xOffset = cubeFaceWidth;
yPixel = (int)((((za + 1f) / 2f) - 1f) * cubeFaceHeight);
yOffset = 2 * cubeFaceHeight;
}
else if (ya == -1)
{
//Down
xPixel = (int)((((xa + 1f) / 2f)) * cubeFaceWidth);
xOffset = cubeFaceWidth;
yPixel = (int)((((za + 1f) / 2f)) * cubeFaceHeight);
yOffset = 0;
}
else if (za == 1)
{
//Front
xPixel = (int)((((xa + 1f) / 2f)) * cubeFaceWidth);
xOffset = cubeFaceWidth;
yPixel = (int)((((ya + 1f) / 2f)) * cubeFaceHeight);
yOffset = cubeFaceHeight;
}
else if (za == -1)
{
//Back
xPixel = (int)((((xa + 1f) / 2f) - 1f) * cubeFaceWidth);
xOffset = 3 * cubeFaceWidth;
yPixel = (int)((((ya + 1f) / 2f)) * cubeFaceHeight);
yOffset = cubeFaceHeight;
}
else
{
Debug.LogWarning("Unknown face, something went wrong");
xPixel = 0;
yPixel = 0;
xOffset = 0;
yOffset = 0;
}
xPixel = Mathf.Abs(xPixel);
yPixel = Mathf.Abs(yPixel);
xPixel += xOffset;
yPixel += yOffset;
color = sourceTexture.GetPixel(xPixel, yPixel);
equiTexture.SetPixel(i, j, color);
}
}
equiTexture.Apply();
var bytes = equiTexture.EncodeToPNG();
Object.DestroyImmediate(equiTexture);
return bytes;
}
}
In order to utilize the GPU I created a shader that does the same conversion. It is much faster than running the conversion pixel by pixel on the CPU but unfortunately Unity imposes resolution limitations on cubemaps so it's usefulness is limited in scenarios when high resolution input image is to be used.
Shader "Conversion/CubemapToEquirectangular" {
Properties {
_MainTex ("Cubemap (RGB)", CUBE) = "" {}
}
Subshader {
Pass {
ZTest Always Cull Off ZWrite Off
Fog { Mode off }
CGPROGRAM
#pragma vertex vert
#pragma fragment frag
#pragma fragmentoption ARB_precision_hint_fastest
//#pragma fragmentoption ARB_precision_hint_nicest
#include "UnityCG.cginc"
#define PI 3.141592653589793
#define TWOPI 6.283185307179587
struct v2f {
float4 pos : POSITION;
float2 uv : TEXCOORD0;
};
samplerCUBE _MainTex;
v2f vert( appdata_img v )
{
v2f o;
o.pos = mul(UNITY_MATRIX_MVP, v.vertex);
o.uv = v.texcoord.xy * float2(TWOPI, PI);
return o;
}
fixed4 frag(v2f i) : COLOR
{
float theta = i.uv.y;
float phi = i.uv.x;
float3 unit = float3(0,0,0);
unit.x = sin(phi) * sin(theta) * -1;
unit.y = cos(theta) * -1;
unit.z = cos(phi) * sin(theta) * -1;
return texCUBE(_MainTex, unit);
}
ENDCG
}
}
Fallback Off
}
The quality of the resulting images can be greatly improved by either employing a more sophisticated method to estimate the color of a pixel during the conversion or by post processing the resulting image (or both, actually). For example an image of bigger size could be generated to apply a blur filter and then downsample it to the desired size.
I created a simple Unity project with two editor wizards that show how to properly utilize either the C# code or the shader shown above. Get it here:
https://github.com/Mapiarz/CubemapToEquirectangular
Remember to set proper import settings in Unity for your input images:
Point filtering
Truecolor format
Disable mipmaps
Non Power of 2: None (only for 2DTextures)
Enable Read/Write (only for 2DTextures)
cube2sphere automates the entire process. Example:
$ cube2sphere front.jpg back.jpg right.jpg left.jpg top.jpg bottom.jpg -r 2048 1024 -fTGA -ostitched
I am working on creating an Ofscreen Enemy indicator using the tutorial mentioned on below link. However I can get the indicator to rotate to point to the enemy but the indicator does not move from end to end of screen.
http://gamedevelopment.tutsplus.com/tutorials/positioning-on-screen-indicators-to-point-to-off-screen-targets--gamedev-6644
This is the desired outcome:
Until now i have managed to figure out the below Please help.
var screenCenter:Vector3 = new Vector3(0.5, 0.5, 0f);
//Note coordinates are translated
//Make 00 the centre of the screen instead of bottom left
screenpos -= screenCenter;
//find angle from center of screen instead of bototom left
var angle:float = Mathf.Atan2(screenpos.y, screenpos.x);
angle -= 90 * Mathf.Deg2Rad;
var cos:float = Mathf.Cos(angle);
var sin:float = -Mathf.Cos(angle);
screenpos = screenCenter + new Vector3(sin*150, cos*150, 0);
//y=mx + b format
var m:float = cos/sin;
var ScreenBounds:Vector3 = screenCenter;// * 0.9f;
//Check up and down first
if(cos > 0){
screenpos = new Vector3(ScreenBounds.y/m, ScreenBounds.y, 0);
}else{//down
screenpos = new Vector3(-ScreenBounds.y/m, -ScreenBounds.y, 0);
}
//If out of bound then get point on appropriate side
if(screenpos.x > ScreenBounds.x){//Out of bound must be on right
screenpos = new Vector3(ScreenBounds.x, ScreenBounds.y*m, 0);
}else if(screenpos.x < ScreenBounds.x){//Out of bound must be on left
screenpos = new Vector3(-ScreenBounds.x, -ScreenBounds.y*m, 0);
}
//Remove the co ordinate translation
screenpos += screenCenter;
var DistanceIndicatorRectT = DistanceIndicator.GetComponent(RectTransform);
DistanceIndicatorRectT.localPosition = new Vector3(screenpos.x * scrWidth/2, screenpos.y * scrHeight/2, DistanceIndicatorRectT.localPosition.z * screenpos.z);
DistanceIndicator.transform.rotation = Quaternion.Euler(0, 0, angle*Mathf.Rad2Deg);
I did a bit of a different approach than you, what Carlos suggested but without using physics.
If "t" is your target, this way you can get it's position on screen in pixels (if it's off screen it just goes to negative values or values higher that width)
Vector3 targetPosOnScreen = Camera.main.WorldToScreenPoint (t.position);
And this function that return a bool whether the Vector3 (in pixels) is on screen
bool onScreen(Vector2 input){
return !(input.x > Screen.width || input.x < 0 || input.y > Screen.height || input.y < 0);
}
First thing we should do is check if the target is on screen, if it's not then proceed with code.
if (onScreen (targetPosOnScreen)) {
//Some code to destroy indicator or make it invisible
return;
}
Then a simple calculation of angle between center of screen and target.
Vector3 center = new Vector3 (Screen.width / 2f, Screen.height / 2f, 0);
float angle = Mathf.Atan2(targetPosOnScreen.y-center.y, targetPosOnScreen.x-center.x) * Mathf.Rad2Deg;
Next part of code determines where the object is compared to camera based on angle we just calculated.
float coef;
if (Screen.width > Screen.height)
coef = Screen.width / Screen.height;
else
coef = Screen.height / Screen.width;
float degreeRange = 360f / (coef + 1);
if(angle < 0) angle = angle + 360;
int edgeLine;
if(angle < degreeRange / 4f) edgeLine = 0;
else if (angle < 180 - degreeRange / 4f) edgeLine = 1;
else if (angle < 180 + degreeRange / 4f) edgeLine = 2;
else if (angle < 360 - degreeRange / 4f) edgeLine = 3;
else edgeLine = 0;
http://s23.postimg.org/ytpm82ad7/Untitled_1.png
Image represents what value "edgeLine" will have based on target position (red represents camera's view) and black lines division of space.
And then we have this code which sets Transform "t2" (indicator) to correct position and angle.
t2.position = Camera.main.ScreenToWorldPoint(intersect(edgeLine, center, targetPosOnScreen)+new Vector3(0,0,10));
t2.eulerAngles = new Vector3 (0, 0, angle);
Below we have function "intersect" which code is:
Vector3 intersect(int edgeLine, Vector3 line2point1, Vector3 line2point2){
float[] A1 = {-Screen.height, 0, Screen.height, 0};
float[] B1 = {0, -Screen.width, 0, Screen.width};
float[] C1 = {-Screen.width * Screen.height,-Screen.width * Screen.height,0, 0};
float A2 = line2point2.y - line2point1.y;
float B2 = line2point1.x - line2point2.x;
float C2 = A2 * line2point1.x + B2 * line2point1.y;
float det = A1[edgeLine] * B2 - A2 * B1[edgeLine];
return new Vector3 ((B2 * C1[edgeLine] - B1[edgeLine] * C2) / det, (A1[edgeLine] * C2 - A2 * C1[edgeLine]) / det, 0);
}
We send to this function index of which line of camera's view (rectangle) we need to check intersection with, and construct a line between center of screen and target position.
For better explanation of this function look here : https://www.topcoder.com/community/data-science/data-science-tutorials/geometry-concepts-line-intersection-and-its-applications/
I just modified values of A1, B1 and C1, each of them is now array of 4 and each values represents value needed for one line of camera's view (rectangle).
If you want to implement margins just change the pivot of indicator (put the actual sprite renderer as child and move it in local space as you want).
Next thing would be making this work for array of targets and putting all those targets in given array. Hope this helps and don't be too hard on me, it's my first time posting here :)
Create a rectangle box collider delimiting the borders of the screen and use Physics2D.Raycast in the direction of the enemy.
The point of collision will tell you where the green arrow needs to be drawn.
In the example above, there is an error with the definition of the angle of visibility of a straight rectangle.
private void SetIndicatorPosition(Indicator obj)
{
var target = obj.Target;
var indicator = obj.PointToTarget;
if (target == null)
{
indicator.SetActive(false);
return;
}
Vector3 targetPosOnScreen = cam.WorldToScreenPoint(target.transform.position);
if (onScreen(targetPosOnScreen))
{
indicator.SetActive(false);
return;
}
indicator.SetActive(true);
Vector3 center = new Vector3(Screen.width / 2f, Screen.height / 2f, 0);
float angle = Mathf.Atan2(targetPosOnScreen.y - center.y, targetPosOnScreen.x - center.x) * Mathf.Rad2Deg;
float scale;
if (Screen.width > Screen.height)
scale = Screen.width / Screen.height;
else
scale = Screen.height / Screen.width;
float degreeRange = 360f / (scale + 1);
float angle2 = Mathf.Atan2(Screen.height - center.y, Screen.width - center.x) * Mathf.Rad2Deg;
if (angle < 0) angle = angle + 360;
int edgeLine;
if (angle < angle2) edgeLine = 0;
else if (angle < 180 - angle2) edgeLine = 1;
else if (angle < 180 + angle2) edgeLine = 2;
else if (angle < 360 - angle2) edgeLine = 3;
else edgeLine = 0;
indicator.transform.position = Camera.main.ScreenToWorldPoint(Intersect(edgeLine, center, targetPosOnScreen));
indicator.transform.eulerAngles = new Vector3(0, 0, angle);
}
Vector3 Intersect(int edgeLine, Vector3 line2point1, Vector3 line2point2)
{
float[] A1 = { -Screen.height, 0, Screen.height, 0 };
float[] B1 = { 0, -Screen.width, 0, Screen.width };
float[] C1 = { -Screen.width * Screen.height, -Screen.width * Screen.height, 0, 0 };
float A2 = line2point2.y - line2point1.y;
float B2 = line2point1.x - line2point2.x;
float C2 = A2 * line2point1.x + B2 * line2point1.y;
float det = A1[edgeLine] * B2 - A2 * B1[edgeLine];
var x = (B2 * C1[edgeLine] - B1[edgeLine] * C2) / det;
var y = (A1[edgeLine] * C2 - A2 * C1[edgeLine]) / det;
return new Vector3(x, y, 0);
}
bool onScreen(Vector2 input)
{
return !(input.x > Screen.width || input.x < 0 || input.y > Screen.height || input.y < 0);
}
public class Indicator
{
public GameObject Target { get; private set; }
public GameObject PointToTarget { get; private set; }
public Indicator(GameObject target, GameObject pointToTarget, ObjectTypeEnum type)
{
Target = target;
PointToTarget = pointToTarget;
var texture = pointToTarget.GetComponentInChildren<UITexture>();
if (texture != null)
{
texture.color = Helper.GetHintColor(type);
}
}
}
You can call in update
foreach (var obj in listIndicator)
{
SetIndicatorPosition(obj);
}
I'm trying to get some basic OpenGL-ES with Shaders to run on the iPhone, based on some examples.
For some reason my projection matrix refuses to result in something on the screen. It feels like a clipping plane is set very near but that contradicts with the values I supply. If I render the same scene with an Orthogonal projection matrix I see my object just no perspective obviously.
Here's the code that generates the projection matrix:
esPerspective(&proj, 45.f, 768.0/1024.0, 1.f, 10000.f);
void esPerspective(ESMatrix *result, float fovy, float aspect, float nearZ, float farZ)
{
float frustumW, frustumH;
frustumH = tanf( fovy / 360.0f * PI ) * nearZ;
frustumW = frustumH * aspect;
esFrustum( result, -frustumW, frustumW, -frustumH, frustumH, nearZ, farZ );
}
void esFrustum(ESMatrix *result, float left, float right, float bottom, float top, float nearZ, float farZ)
{
float deltaX = right - left;
float deltaY = top - bottom;
float deltaZ = farZ - nearZ;
ESMatrix frust;
if ( (nearZ <= 0.0f) || (farZ <= 0.0f) ||
(deltaX <= 0.0f) || (deltaY <= 0.0f) || (deltaZ <= 0.0f) )
return;
frust.m[0][0] = 2.0f * nearZ / deltaX;
frust.m[0][1] = frust.m[0][2] = frust.m[0][3] = 0.0f;
frust.m[1][1] = 2.0f * nearZ / deltaY;
frust.m[1][0] = frust.m[1][2] = frust.m[1][3] = 0.0f;
frust.m[2][0] = (right + left) / deltaX;
frust.m[2][1] = (top + bottom) / deltaY;
frust.m[2][2] = -(nearZ + farZ) / deltaZ;
frust.m[2][3] = -1.0f;
frust.m[3][2] = -2.0f * nearZ * farZ / deltaZ;
frust.m[3][0] = frust.m[3][1] = frust.m[3][3] = 0.0f;
esMatrixMultiply(result, &frust, result);
}
My projection matrix comes out as:
[3.21, 0, 0, 0]
[0, 2.41, 0, 0]
[0, 0, -1, -1]
[0, 0, -2, 0]
Even if I manually set the [3][3] cell to 1 I still don't see anything.
Any ideas?
Swap your rows and columns over (ie transpose).
Well your projection matrix is transposed either way. You have a row major projection matrix ...