- Home /
How can I draw a X*X pixels rect to fill a X*X RenderTexture?
Hello. I'm trying to fill a X*X RenderTexture with output of a shader. My scene has a Camera (EffectCamera script), a Plane (EffectScreenQuad script) and a RenderTexture. In EffectScreenQuad.Start() I create the plane by scratch, moving the vertices. cam is equal to the Camera which have EffectCamera script attached.
public class EffectScreenQuad : MonoBehaviour
{
public Vector2 dimensions;
public Camera cam;
// Use this for initialization
void Start ()
{
Vector3[] vertices = new Vector3[4];
Vector3[] normals = new Vector3[4];
Vector2[] uv = new Vector2[4];
int[] triangles = new int[6];
//Create plane vertices that fill a dimensions.x*dimension.y pixels rect
Vector3 lowerLeftPlaneScreenPos = new Vector3(0,0,0);
Vector3 lowerRightPlaneScreenPos= new Vector3(dimensions.x , 0 , 0);
Vector3 upperLeftPlaneScreenPos = new Vector3(0 , dimensions.y , 0);
Vector3 lowerLeftPlaneWorldPos = cam.ScreenToWorldPoint(lowerLeftPlaneScreenPos);
Vector3 lowerRightPlaneWorldPos = cam.ScreenToWorldPoint(lowerRightPlaneScreenPos);
Vector3 upperLeftPlaneWorldPos = cam.ScreenToWorldPoint(upperLeftPlaneScreenPos);
vertices[0] = lowerLeftPlaneWorldPos;
vertices[1] = lowerRightPlaneWorldPos;
vertices[2] = upperLeftPlaneWorldPos;
vertices[3] = upperLeftPlaneWorldPos + lowerRightPlaneWorldPos;
for (int i = 0; i < 4; i++)
normals[i] = Vector3.back;
uv[0] = Vector2.zero;
uv[1] = Vector2.right;
uv[2] = Vector2.up;
uv[3] = Vector2.right + Vector2.up;
triangles[0] = 0;
triangles[1] = 1;
triangles[2] = 2;
triangles[3] = 0;
triangles[4] = 2;
triangles[5] = 3;
MeshFilter mesh_filter = gameObject.GetComponent("MeshFilter") as MeshFilter;
mesh_filter.mesh = new Mesh ();
mesh_filter.mesh.vertices = vertices;
mesh_filter.mesh.normals = normals;
mesh_filter.mesh.uv = uv;
mesh_filter.mesh.triangles = triangles;
}
// Update is called once per frame
void Update () {
}
}
The next snippet is the shader. The commented code is what I need to return, but for now I'm just returning the uv coords, since I couldn't debug the returned value using floating point TextureFormats.
Shader "clothInitPos" {
Properties {
_posTexSize("_posTexSize",Float)=16
_posOffset("_posOffset",Float)=5
}
SubShader {
Tags { "RenderType"="Opaque" }
Pass
{
//First pass initializes position texture. To ensure all vertices being generated, a quad must be
//drawn that fills _posTexSize*_posTexSize pixels.
CGPROGRAM
#pragma target 3.0
#pragma vertex vert
#pragma fragment frag
uniform float _posTexSize;
uniform float _posOffset;
struct v2f {
float4 pos:POSITION;
float2 uv:TEXCOORD0;
};
v2f vert(v2f IN)
{
v2f toFrag = IN;
return toFrag;
}
float4 frag(v2f fromVert):COLOR
{
//Generate a regular grid by scaling uv coords by the number of verts and adding the position
//offset
//float2 scaledUV = fromVert.uv*_posTexSize*_posOffset;
//return float4(scaledUV.x , 0.0f , scaledUV.y , 1.0f); //Grid in the y=0 plane. Alpha unused.
return float4(fromVert.uv.x , 0.0f , fromVert.uv.y , 1.0f);
}
ENDCG
}
}
FallBack "Diffuse"
}
I was expecting the result image to be a gradient of red and blue. In EffectCamera.OnRenderImage() I check the returned texture but a lot of pixels has the Camera background color, which makes me confuse.
public class EffectCamera : MonoBehaviour {
public Vector2 dimensions;
public Camera cam;
// Use this for initialization
void Start () {
}
// Update is called once per frame
void Update () {
}
void OnRenderImage(RenderTexture src , RenderTexture dst)
{
Texture2D tex2D = new Texture2D((int)dimensions.x , (int)dimensions.y ,
TextureFormat.ARGB32 , false);
tex2D.ReadPixels(new Rect(0 , 0 , dimensions.x , dimensions.y) , 0 , 0);
Color[] colors = tex2D.GetPixels ();
int dummy = 0;
}
}
Any help will be very appreciated.
Answer by dsilvavini · Feb 11, 2013 at 04:09 AM
Solved the problem. Here's the code to initialize a X*X pixel plane using an orthographic camera (cam in the snippet). The cam orthographic size must be set to dimensions.x/2 and it must render to a dimensions.x*dimensions.y RenderTexture (set in inspector).
using UnityEngine;
using System.Collections;
public class EffectScreenQuad : MonoBehaviour
{
public Vector2 dimensions; //dimensions.x == dimmensions.y
public Camera cam;
// Use this for initialization
void Start ()
{
Vector3[] vertices = new Vector3[4];
Vector3[] normals = new Vector3[4];
Vector2[] uv = new Vector2[4];
int[] triangles = new int[6];
//Create plane vertices that fill a dimensions.x*dimension.y pixels rect
Vector3 lowerLeftPlaneScreenPos = new Vector3(0 , 0 , 0);
Vector3 lowerRightPlaneScreenPos= new Vector3(dimensions.x , 0 , 0);
Vector3 upperLeftPlaneScreenPos = new Vector3(0 , dimensions.y , 0);
Vector3 lowerLeftPlaneWorldPos = cam.ScreenToWorldPoint(lowerLeftPlaneScreenPos);
Vector3 lowerRightPlaneWorldPos = cam.ScreenToWorldPoint(lowerRightPlaneScreenPos);
Vector3 upperLeftPlaneWorldPos = cam.ScreenToWorldPoint(upperLeftPlaneScreenPos);
Vector3 heightDelta = (upperLeftPlaneWorldPos - lowerLeftPlaneWorldPos)/2;
Vector3 widthDelta = (lowerRightPlaneWorldPos - lowerLeftPlaneWorldPos)/2;
vertices[0] = lowerLeftPlaneWorldPos;
vertices[1] = lowerRightPlaneWorldPos;
vertices[2] = upperLeftPlaneWorldPos;
vertices[3] = heightDelta + widthDelta;
Vector3 normal = Vector3.Cross(widthDelta , heightDelta);
normal.Normalize();
for (int i = 0; i < 4; i++)
normals[i] = normal;
uv[0] = Vector2.zero;
uv[1] = Vector2.right;
uv[2] = Vector2.up;
uv[3] = Vector2.right + Vector2.up;
//Unity3D culls counterclockwise faces...
triangles[0] = 0;
triangles[1] = 2;
triangles[2] = 1;
triangles[3] = 1;
triangles[4] = 2;
triangles[5] = 3;
MeshFilter mesh_filter = gameObject.GetComponent("MeshFilter") as MeshFilter;
mesh_filter.mesh = new Mesh ();
mesh_filter.mesh.vertices = vertices;
mesh_filter.mesh.normals = normals;
mesh_filter.mesh.uv = uv;
mesh_filter.mesh.triangles = triangles;
}
// Update is called once per frame
void Update () {
}
}
And this is the shader (currently returning just UV coords):
Shader "clothInitPos" {
Properties {
_posTexSize("_posTexSize",Float)=16
_posOffset("_posOffset",Float)=5
}
SubShader {
Tags { "RenderType"="Opaque" }
Pass
{
//First pass initializes position texture. To ensure all vertices being generated, a quad must be
//drawn that fills _posTexSize*_posTexSize pixels.
CGPROGRAM
#pragma target 3.0
#pragma vertex vert
#pragma fragment frag
uniform float _posTexSize;
uniform float _posOffset;
struct v2f {
float4 pos:POSITION;
float2 uv:TEXCOORD0;
};
v2f vert(v2f IN)
{
v2f toFrag;
toFrag.pos = mul(UNITY_MATRIX_MVP, IN.pos);
toFrag.uv = IN.uv;
return toFrag;
}
float4 frag(v2f fromVert):COLOR
{
//Generate a regular grid by scaling uv coords by the number of verts and adding the position
//offset
//float2 scaledUV = fromVert.uv*_posTexSize*_posOffset;
//return float4(scaledUV.x , 0.0f , scaledUV.y , 1.0f); //Grid in the y=0 plane. Alpha unused.
return float4(fromVert.uv.x , 0.0f , fromVert.uv.y , 1.0f);
}
ENDCG
}
}
FallBack "Diffuse"
}