- Home /
Getting triangle center in fragment shader
Hello,
I am creating a custom shader to display a point cloud. The shader creates a triangle for each vertex centered on the vertex itself.
I would like to color the triangle as a circle. More in detail, I am trying to set the alpha of each pixel decreasing as it gets further away from the center of the triangle.
However my code right now always displays alpha = 1
How might I solve this problem?
Shader "Custom/SpherePoint" {
Properties {
//_Color ("Color", Color) = (1,1,1,1)
_Radius ("Sphere Radius", float) = 0.01
}
SubShader {
Tags{ "Queue" = "Transparent" "IgnoreProjector" = "True" "RenderType" = "Transparent" }
ZWrite Off
Blend SrcAlpha OneMinusSrcAlpha
LOD 200
Pass {
CGPROGRAM
#pragma vertex vert
#pragma fragment frag
#pragma geometry geom
#pragma target 4.0 // Use shader model 3.0 target, to get nicer looking lighting
#include "UnityCG.cginc"
struct vertexIn {
float4 pos : POSITION;
float4 color : COLOR;
};
struct vertexOut {
float4 pos : SV_POSITION;
float4 color : COLOR;
float3 normal : NORMAL;
};
struct geomOut {
float4 pos : SV_POSITION;
float4 color : COLOR;
float3 normal : NORMAL;
float3 triangleCenter : TANGENT;
float norm : PSIZE;
};
//Vertex shader: computes normal wrt camera
vertexOut vert (vertexIn i) {
vertexOut o;
//o.pos = i.pos;
//o.pos = UnityObjectToClipPos(i.pos);
o.pos = mul(unity_ObjectToWorld, i.pos);
o.color = i.color;
//o.normal = normalize(_WorldSpaceCameraPos - mul(unity_ObjectToWorld, o.pos).xyz); //normal is towards the camera
o.normal = ObjSpaceViewDir(o.pos);
//o.normal = i.normal;
return o;
}
float _Radius;
//Geometry shaders: Creates an equilateral triangle with the original vertex in the orthocenter
[maxvertexcount(3)]
void geom(point vertexOut i[1], inout TriangleStream<geomOut> OutputStream)
{
float3 perpendicular = normalize(float3(i[0].normal.y - i[0].normal.x, i[0].normal.x, i[0].normal.x)) * _Radius;
geomOut o, p;
o.color = i[0].color;
//o.normal = i[0].normal;
//o.pos = i[0].pos;
//o.pos = float4(i[0].pos.x * _Radius, i[0].pos.yz, 1);
o.pos = float4(perpendicular + i[0].pos.xyz, 1);
//o.pos = float4(normalize(perpendicular + i[0].pos).xyz * _Radius , 1); //Generic perpendicular to the normal
o.pos = UnityObjectToClipPos(o.pos);
o.normal = ObjSpaceViewDir(o.pos);
//o.triangleCenter = UnityObjectToClipPos(i[0].pos).xyz;
o.triangleCenter = o.pos;
o.norm = distance(o.pos, o.triangleCenter);
OutputStream.Append(o);
p.color = i[0].color;
//p.normal = i[0].normal;
//p.pos = float4(perpendicular.xyz * _Radius, 1);
//p.pos = float4(i[0].pos.x, i[0].pos.y * _Radius, i[0].pos.z, 1);
//p.pos = float4( (normalize(cross(i[0].pos.xyz, perp.xyz)) - perp.xyz / 2 + i[0].pos) * _Radius,1);
p.pos = float4((-(normalize(cross(i[0].pos.xyz, perpendicular)) * _Radius) + perpendicular / 2) + i[0].pos.xyz, 1);
p.pos = UnityObjectToClipPos(p.pos);
p.normal = ObjSpaceViewDir(p.pos);
p.triangleCenter = o.triangleCenter;
p.norm = o.norm;
OutputStream.Append(p);
p.color = i[0].color;
//p.pos = float4(perpendicular.xyz * _Radius * -1, 1);
//o.pos = float4(i[0].pos.x * _Radius, i[0].pos.yz, 1);
//p.pos = float4(-1 * (normalize(cross(i[0].pos.xyz, perp.xyz)) * _Radius) /* - perp.xyz / 2 + i[0].pos*/, 1);
p.pos = float4((-(normalize(cross(i[0].pos.xyz, perpendicular)) * _Radius) - perpendicular / 2) + i[0].pos.xyz, 1);
p.pos = UnityObjectToClipPos(p.pos);
ObjSpaceViewDir(o.pos);
p.normal = ObjSpaceViewDir(p.pos);
p.triangleCenter = o.triangleCenter;
p.norm = o.norm;
OutputStream.Append(p);
OutputStream.RestartStrip();
}
float4 frag(geomOut i) : COLOR
{
float4 col = i.color;
col.a = distance(i.pos.xyz, i.triangleCenter.xyz)/ i.norm;
return col;
}
ENDCG
}
}
FallBack "Diffuse"
}
Answer by squidgemelent · Dec 02, 2017 at 01:20 PM
Is there a reason you are using one triangle and not two triangles resulting in a square to draw a circle? While it may seem more efficient to draw just one triangle instead of two, you need to consider that you may be over-complicating. Firstly, the GPU is insanely fast and depending on your task is likely not an impactful optimisation. Secondly, you're performing a distance operation on every single fragment anyway. Thirdly, drawing two triangles instead of one is not necessarily taking double the time depending on how you're rendering the result on screen. And lastly, a triangle could potentially result in more overdraw depending on how you're doing it.
If you drew a square instead, it would be very easy to assign a UV coordinate to each geometry vertex. The GPU will calculate the UV coordinate of each individual fragment for you, and you can choose the alpha simply based off a much simpler calculation than distance(x, y)
Your answer
