- Home /
How to get camera location in object space?
Hey Guys,
In a cg vertex shader, I'm trying to get the camera's location in object space so that I can pass it to the fragment shader and then calculate a ray from the camera to the pixel in object space.
The intent is to have a ray to do some ray tracing in local space of the object.
Unfortunately it is almost working but not quite as you can see below.
I can see the sphere from the positive axis looking negative only (or vice versa, hard to tell), and the way it moves is not correct. As best as I can tell, the ray direction I'm calculating in the pixel shader from the local camera position to the object position is wrong, due to the object space camera position being wrong.
My shader is below. Can anyone see what I'm doing wrong? Thanks!
Shader "TestShaders/TestCGShader"
{
Properties
{
_Color("Main Color", Color) = (1,1,1,1)
}
SubShader {
Pass {
CGPROGRAM
// Built in variables: https://docs.unity3d.com/Manual/SL-UnityShaderVariables.html
// TODO: put this in an include or something so it's system wide?
#pragma enable_d3d11_debug_symbols
#pragma vertex vert
#pragma fragment frag
#include "UnityCG.cginc"
fixed4 _Color;
struct SVertexOutput {
float4 pos : SV_POSITION;
// TODO: can these be float3? check out W and stuff when scaling and rotating. maybe need to divide by w.
float4 localPos : COLOR0;
float4 cameraLocalPos : COLOR1; // TODO: this is the same for all vertices, can we handle it differently? maybe we pass rayDir down instead?
};
struct SPixelOutput {
float4 color : SV_Target;
float depth : SV_Depth;
};
// TODO: rename function and params
float2 sphIntersect (in float3 ro, in float3 rd, in float4 sph)
{
float3 oc = ro - sph.xyz;
float b = dot(oc, rd);
float c = dot(oc, oc) - sph.w*sph.w;
float h = b*b - c;
if (h<0.0)
return float2(-1.0, -1.0);
h = sqrt(h);
return -b + float2(-h, h);
}
SVertexOutput vert (appdata_base v)
{
SVertexOutput o;
o.pos = UnityObjectToClipPos(v.vertex);
o.localPos = v.vertex;
o.cameraLocalPos = mul(unity_WorldToObject, _WorldSpaceCameraPos);
return o;
}
SPixelOutput frag (SVertexOutput i)
{
SPixelOutput o;
float3 rayPos = i.localPos.xyz;
float3 rayDir = normalize(i.localPos.xyz - i.cameraLocalPos.xyz);
float2 intersect = sphIntersect (rayPos, rayDir, float4(0.0, 0.0, 0.0, 0.25));
if (intersect.y < 0.0)
{
// TODO: just discard, after it's working!
//discard;
o.depth = 0.5;
o.color = fixed4(1.0, 0.0, 0.0, 1.0);
return o;
}
float time = intersect.x < 0.0 ? intersect.y : intersect.x;
// TODO: depth output is from 0 to 1 i believe, so need to handle that somehow, i dunno how.
o.depth = time; // TODO: need to add depth to pixel!
//o.color = fixed4(rayInfo.yzw * 0.5 + 0.5, 1.0);
o.color = fixed4(1.0, 1.0, 0.0, 1.0);
//return fixed4(1.0, 1.0, 0.0, 1.0);
//return fixed4(rayPos*0.5+0.5, 1);
return o;
}
ENDCG
}
}
}
Answer by KingCubby · May 02, 2017 at 05:56 AM
You need to make _WorldCameraPos into a point by converting it to a float4 with 1.0 in the w-component, otherwise the transform won't include translation. I had this exact problem.
SVertexOutput vert (appdata_base v)
{
SVertexOutput o;
o.pos = UnityObjectToClipPos(v.vertex);
o.localPos = v.vertex;
o.cameraLocalPos = mul(unity_WorldToObject, float4(_WorldSpaceCameraPos, 1.0));
return o;
}
Your answer
Follow this Question
Related Questions
Do TEXCOORDS need to be in sequence? 1 Answer
Shaders problems after moving to URP 0 Answers
Ray2d origin remains on 0,0,0 and I don't know why ;( 2 Answers
Limit distance of Depth Mask Shader 0 Answers