CSharpGL(25)一个用raycast实现体渲染VolumeRender的例子
本文涉及的VolumeRendering相关的C#代码是从(https://github.com/toolchainX/Volume_Rendering_Using_GLSL)的C++代码转换来的。
效果图
下载
CSharpGL已在GitHub开源,欢迎对OpenGL有兴趣的同学加入(https://github.com/bitzhuwei/CSharpGL)
实现思路
raycast
用一个3D纹理存储整个模型数据。
如下图所示,从画布上的一个像素点出发,垂直于画布的方向上,一条射线穿过3D纹理,每隔一小段距离采样一次颜色,累加起来就是此像素点应有的颜色。
起始点/终点
从射线接触3D纹理的第一个点开始(起始点),到射线离开3D纹理的位置(终点),这段距离就是要采样的范围。
终点
那么如何获取终点的位置?
办法是:渲染一个恰好包围3D纹理的立方体,且只渲染此立方体的背面(用glCullFace(GL_FRONT);)。因为背面就是终点啊。另外,要把这个渲染结果弄到一个2D纹理上。这就需要一个与画布大小相同的2D纹理来记录终点的位置,即需要一个新的FrameBuffer。(详情可参考http://www.opengl-tutorial.org/intermediate-tutorials/tutorial-14-render-to-texture,以后有时间我会把这个翻译一下)
渲染背面的shader非常简单。
// for raycasting
#version layout(location = ) in vec3 position;
layout(location = ) in vec3 color; out vec3 passColor; uniform mat4 MVP; void main()
{
passColor = color;
//passColor = vec4(1, 1, 1, 1);
gl_Position = MVP * vec4(position, 1.0);
}
backface.vert
// for raycasting
#version in vec3 passColor;
layout (location = ) out vec4 FragColor; void main()
{
FragColor = vec4(passColor, 1.0);
}
backface.frag
渲染时则需要注意启用新的framebuffer。这样就会渲染到指定的2D纹理上。
// render to texture
glBindFramebufferEXT(OpenGL.GL_FRAMEBUFFER_EXT, frameBuffer[]);
OpenGL.Clear(OpenGL.GL_COLOR_BUFFER_BIT | OpenGL.GL_DEPTH_BUFFER_BIT);
this.backfaceRenderer.Render(arg);
glBindFramebufferEXT(OpenGL.GL_FRAMEBUFFER_EXT, );
注意,上图中左侧是立方体的前面,右侧是立方体的背面。
起始点/raycast
终点有了,起点也就知道怎么找了:渲染一个恰好包围3D纹理的立方体,且只渲染此立方体的前面(用glCullFace(GL_BACK);)。
起始点和终点都有了,就可以通过累加颜色来计算一个像素点的颜色值了。
#version in vec3 EntryPoint;
in vec4 ExitPointCoord; uniform sampler2D ExitPoints;
uniform sampler3D VolumeTex;
uniform sampler1D TransferFunc;
uniform float StepSize = 0.001f;
uniform vec2 ScreenSize;
uniform vec4 backgroundColor = vec4(, , , );// value in glClearColor(value);
layout (location = ) out vec4 FragColor; void main()
{
// ExitPointCoord is normalized device coordinate
vec3 exitPoint = texture(ExitPoints, gl_FragCoord.st / ScreenSize).xyz;
// that will actually give you clip-space coordinates rather than
// normalised device coordinates, since you're not performing the perspective
// division which happens during the rasterisation process (between the vertex
// shader and fragment shader
// vec2 exitFragCoord = (ExitPointCoord.xy / ExitPointCoord.w + 1.0)/2.0;
// vec3 exitPoint = texture(ExitPoints, exitFragCoord).xyz; //background need no raycasting
if (EntryPoint == exitPoint) { discard; } vec3 direction = exitPoint - EntryPoint;
float directionLength = length(direction); // the length from front to back is calculated and used to terminate the ray
vec3 deltaDirection = direction * (StepSize / directionLength); vec3 voxelCoord = EntryPoint;
vec3 colorAccumulator = vec3(0.0); // The dest color
float alphaAccumulator = 0.0f;
float lengthAccumulator = 0.0;
float intensity;
vec4 colorSample; // The src color for(int i = ; i < ; i++)
{
// get scaler value in the volume data
intensity = texture(VolumeTex, voxelCoord).x;
// get mapped color from 1-D texture
colorSample = texture(TransferFunc, intensity);
// modulate the value of colorSample.a
// front-to-back integration
if (colorSample.a > 0.0) {
// accomodate for variable sampling rates (base interval defined by mod_compositing.frag)
colorSample.a = 1.0 - pow(1.0 - colorSample.a, StepSize * 200.0f);
colorAccumulator += (1.0 - alphaAccumulator) * colorSample.rgb * colorSample.a;
alphaAccumulator += (1.0 - alphaAccumulator) * colorSample.a;
}
voxelCoord += deltaDirection;
lengthAccumulator += StepSize;
if (lengthAccumulator >= directionLength)
{
colorAccumulator = colorAccumulator * alphaAccumulator
+ ( - alphaAccumulator) * backgroundColor.rgb;
break; // terminate if opacity > 1 or the ray is outside the volume
}
else if (alphaAccumulator > 1.0)
{
alphaAccumulator = 1.0;
break;
}
}
FragColor = vec4(colorAccumulator, alphaAccumulator);
}
raycast.frag
Raycast所需的vertex shader和backface.vert几乎一样。
#version layout (location = ) in vec3 position;
// have to use this variable!!!, or it will be very hard to debug for AMD video card
layout (location = ) in vec3 color; out vec3 EntryPoint;
out vec4 ExitPointCoord; uniform mat4 MVP; void main()
{
EntryPoint = color;
gl_Position = MVP * vec4(position,1.0);
ExitPointCoord = gl_Position;
}
raycast.vert
而(在CSharpGL中)所需的渲染指令也只需一句话。
this.raycastRenderer.Render(arg);
Miscellaneous
在实现上述过程之前,必须初始化很多东西:3D纹理,附带了2D纹理的frameBuffer,用于渲染背面的shader和立方体模型,用于渲染正面/raycast的shader和立方体模型,从float类型的intensity值到vec3类型的颜色值的转换功能(1D纹理),设置uniform变量。
protected override void DoInitialize()
{
InitBackfaceRenderer(); InitRaycastRenderer(); initTFF1DTex(@"10RaycastVolumeRender\tff.dat");
int[] viewport = OpenGL.GetViewport();
initFace2DTex(viewport[], viewport[]);
initVol3DTex(@"10RaycastVolumeRender\head256.raw", , , );
initFrameBuffer(viewport[], viewport[]); //this.depthTest = new DepthTestSwitch(); RaycastingSetupUniforms();
} private void RaycastingSetupUniforms()
{
// setting uniforms such as
// ScreenSize
// StepSize
// TransferFunc
// ExitPoints i.e. the backface, the backface hold the ExitPoints of ray casting
// VolumeTex the texture that hold the volume data i.e. head256.raw
int[] viewport = OpenGL.GetViewport();
this.raycastRenderer.SetUniform("ScreenSize", new vec2(viewport[], viewport[]));
this.raycastRenderer.SetUniform("StepSize", g_stepSize);
this.raycastRenderer.SetUniform("TransferFunc", new samplerValue(BindTextureTarget.Texture1D, transferFunc1DTexObj[], OpenGL.GL_TEXTURE0));
this.raycastRenderer.SetUniform("ExitPoints", new samplerValue(BindTextureTarget.Texture2D, backface2DTexObj[], OpenGL.GL_TEXTURE1));
this.raycastRenderer.SetUniform("VolumeTex", new samplerValue(BindTextureTarget.Texture3D, vol3DTexObj[], OpenGL.GL_TEXTURE2));
var clearColor = new float[];
OpenGL.GetFloat(GetTarget.ColorClearValue, clearColor);
this.raycastRenderer.SetUniform("backgroundColor", clearColor.ToVec4());
} private void initFrameBuffer(int texWidth, int texHeight)
{
// create a depth buffer for our framebuffer
var depthBuffer = new uint[];
OpenGL.GetDelegateFor<OpenGL.glGenRenderbuffersEXT>()(, depthBuffer);
OpenGL.GetDelegateFor<OpenGL.glBindRenderbufferEXT>()(OpenGL.GL_RENDERBUFFER, depthBuffer[]);
OpenGL.GetDelegateFor<OpenGL.glRenderbufferStorageEXT>()(OpenGL.GL_RENDERBUFFER, OpenGL.GL_DEPTH_COMPONENT, texWidth, texHeight); // attach the texture and the depth buffer to the framebuffer
OpenGL.GetDelegateFor<OpenGL.glGenFramebuffersEXT>()(, frameBuffer);
OpenGL.GetDelegateFor<OpenGL.glBindFramebufferEXT>()(OpenGL.GL_FRAMEBUFFER_EXT, frameBuffer[]);
OpenGL.GetDelegateFor<OpenGL.glFramebufferTexture2DEXT>()(OpenGL.GL_FRAMEBUFFER_EXT, OpenGL.GL_COLOR_ATTACHMENT0_EXT, OpenGL.GL_TEXTURE_2D, backface2DTexObj[], );
OpenGL.GetDelegateFor<OpenGL.glFramebufferRenderbufferEXT>()(OpenGL.GL_FRAMEBUFFER_EXT, OpenGL.GL_DEPTH_ATTACHMENT_EXT, OpenGL.GL_RENDERBUFFER, depthBuffer[]);
checkFramebufferStatus();
//OpenGL.Enable(GL_DEPTH_TEST);
} private void checkFramebufferStatus()
{
uint complete = OpenGL.GetDelegateFor<OpenGL.glCheckFramebufferStatusEXT>()(OpenGL.GL_FRAMEBUFFER_EXT);
if (complete != OpenGL.GL_FRAMEBUFFER_COMPLETE_EXT)
{
throw new Exception("framebuffer is not complete");
}
} private void initVol3DTex(string filename, int width, int height, int depth)
{
var data = new UnmanagedArray<byte>(width * height * depth);
unsafe
{
int index = ;
int readCount = ;
byte* array = (byte*)data.Header.ToPointer();
using (var fs = new FileStream(filename, FileMode.Open, FileAccess.Read))
using (var br = new BinaryReader(fs))
{
int unReadCount = (int)fs.Length;
const int cacheSize = * ;
do
{
int min = Math.Min(cacheSize, unReadCount);
var cache = new byte[min];
readCount = br.Read(cache, , min);
if (readCount != min)
{ throw new Exception(); } for (int i = ; i < readCount; i++)
{
array[index++] = cache[i];
}
unReadCount -= readCount;
} while (readCount > );
}
} OpenGL.GenTextures(, vol3DTexObj);
// bind 3D texture target
OpenGL.BindTexture(OpenGL.GL_TEXTURE_3D, vol3DTexObj[]);
OpenGL.TexParameteri(OpenGL.GL_TEXTURE_3D, OpenGL.GL_TEXTURE_MAG_FILTER, (int)OpenGL.GL_LINEAR);
OpenGL.TexParameteri(OpenGL.GL_TEXTURE_3D, OpenGL.GL_TEXTURE_MIN_FILTER, (int)OpenGL.GL_LINEAR);
OpenGL.TexParameteri(OpenGL.GL_TEXTURE_3D, OpenGL.GL_TEXTURE_WRAP_S, (int)OpenGL.GL_REPEAT);
OpenGL.TexParameteri(OpenGL.GL_TEXTURE_3D, OpenGL.GL_TEXTURE_WRAP_T, (int)OpenGL.GL_REPEAT);
OpenGL.TexParameteri(OpenGL.GL_TEXTURE_3D, OpenGL.GL_TEXTURE_WRAP_R, (int)OpenGL.GL_REPEAT);
// pixel transfer happens here from client to OpenGL server
OpenGL.PixelStorei(OpenGL.GL_UNPACK_ALIGNMENT, );
OpenGL.TexImage3D(OpenGL.GL_TEXTURE_3D, , (int)OpenGL.GL_INTENSITY,
width, height, depth, ,
OpenGL.GL_LUMINANCE, OpenGL.GL_UNSIGNED_BYTE, data.Header);
data.Dispose();
} private void initFace2DTex(int width, int height)
{
OpenGL.GenTextures(, backface2DTexObj);
OpenGL.BindTexture(OpenGL.GL_TEXTURE_2D, backface2DTexObj[]);
OpenGL.TexParameteri(OpenGL.GL_TEXTURE_2D, OpenGL.GL_TEXTURE_WRAP_S, (int)OpenGL.GL_REPEAT);
OpenGL.TexParameteri(OpenGL.GL_TEXTURE_2D, OpenGL.GL_TEXTURE_WRAP_T, (int)OpenGL.GL_REPEAT);
OpenGL.TexParameteri(OpenGL.GL_TEXTURE_2D, OpenGL.GL_TEXTURE_MIN_FILTER, (int)OpenGL.GL_NEAREST);
OpenGL.TexParameteri(OpenGL.GL_TEXTURE_2D, OpenGL.GL_TEXTURE_MAG_FILTER, (int)OpenGL.GL_NEAREST);
OpenGL.TexImage2D(OpenGL.GL_TEXTURE_2D, , OpenGL.GL_RGBA16F, width, height, , OpenGL.GL_RGBA, OpenGL.GL_FLOAT, IntPtr.Zero);
} private void initTFF1DTex(string filename)
{
// read in the user defined data of transfer function
byte[] tff;
using (var fs = new FileStream(filename, FileMode.Open, FileAccess.Read))
using (var br = new BinaryReader(fs))
{
tff = br.ReadBytes((int)fs.Length);
}
OpenGL.GenTextures(, transferFunc1DTexObj);
OpenGL.BindTexture(OpenGL.GL_TEXTURE_1D, transferFunc1DTexObj[]);
OpenGL.TexParameteri(OpenGL.GL_TEXTURE_1D, OpenGL.GL_TEXTURE_WRAP_S, (int)OpenGL.GL_REPEAT);
OpenGL.TexParameteri(OpenGL.GL_TEXTURE_1D, OpenGL.GL_TEXTURE_MIN_FILTER, (int)OpenGL.GL_NEAREST);
OpenGL.TexParameteri(OpenGL.GL_TEXTURE_1D, OpenGL.GL_TEXTURE_MAG_FILTER, (int)OpenGL.GL_NEAREST);
OpenGL.PixelStorei(OpenGL.GL_UNPACK_ALIGNMENT, );
OpenGL.TexImage1D(OpenGL.GL_TEXTURE_1D, , OpenGL.GL_RGBA8, , , OpenGL.GL_RGBA, OpenGL.GL_UNSIGNED_BYTE, tff);
} private void InitRaycastRenderer()
{
var shaderCodes = new ShaderCode[];
shaderCodes[] = new ShaderCode(File.ReadAllText(@"10RaycastVolumeRender\raycasting.vert"), ShaderType.VertexShader);
shaderCodes[] = new ShaderCode(File.ReadAllText(@"10RaycastVolumeRender\raycasting.frag"), ShaderType.FragmentShader);
var map = new PropertyNameMap();
map.Add("position", "position");
map.Add("color", "color");
this.raycastRenderer = new Renderer(model, shaderCodes, map);
this.raycastRenderer.Initialize();
this.raycastRenderer.SwitchList.Add(new CullFaceSwitch(CullFaceMode.Back, true));
} private void InitBackfaceRenderer()
{
var shaderCodes = new ShaderCode[];
shaderCodes[] = new ShaderCode(File.ReadAllText(@"10RaycastVolumeRender\backface.vert"), ShaderType.VertexShader);
shaderCodes[] = new ShaderCode(File.ReadAllText(@"10RaycastVolumeRender\backface.frag"), ShaderType.FragmentShader);
var map = new PropertyNameMap();
map.Add("position", "position");
map.Add("color", "color");
this.backfaceRenderer = new Renderer(model, shaderCodes, map);
this.backfaceRenderer.Initialize();
this.backfaceRenderer.SwitchList.Add(new CullFaceSwitch(CullFaceMode.Front, true));
}
Initialize
2018-01-16
最近终于解决了在某些电脑上不显示VR的情况。原来是我没有把3个Texture分别绑定到不同的Unit上。
总结
当然,也可以先渲染出起始点,然后再找到终点的时候计算各个像素点的颜色值。
raycast做volume rendering的这个例子中,最耗空间的是3D纹理。但是这是无法避免的。其他空间和时间耗费都是极少的。
欢迎对OpenGL有兴趣的同学关注(https://github.com/bitzhuwei/CSharpGL)