添加关照、全局等高线、修改图层问题

This commit is contained in:
2025-07-17 18:54:05 +08:00
parent c781d38c0c
commit b274b62671
4594 changed files with 791769 additions and 4921 deletions

View File

@ -0,0 +1,89 @@
/**
* ACES Filmic Tone Mapping Shader by Stephen Hill
* source: https://github.com/selfshadow/ltc_code/blob/master/webgl/shaders/ltc/ltc_blit.fs
*
* this implementation of ACES is modified to accommodate a brighter viewing environment.
* the scale factor of 1/0.6 is subjective. see discussion in #19621.
*/
const ACESFilmicToneMappingShader = {
name: 'ACESFilmicToneMappingShader',
uniforms: {
'tDiffuse': { value: null },
'exposure': { value: 1.0 }
},
vertexShader: /* glsl */`
varying vec2 vUv;
void main() {
vUv = uv;
gl_Position = projectionMatrix * modelViewMatrix * vec4( position, 1.0 );
}`,
fragmentShader: /* glsl */`
#define saturate(a) clamp( a, 0.0, 1.0 )
uniform sampler2D tDiffuse;
uniform float exposure;
varying vec2 vUv;
vec3 RRTAndODTFit( vec3 v ) {
vec3 a = v * ( v + 0.0245786 ) - 0.000090537;
vec3 b = v * ( 0.983729 * v + 0.4329510 ) + 0.238081;
return a / b;
}
vec3 ACESFilmicToneMapping( vec3 color ) {
// sRGB => XYZ => D65_2_D60 => AP1 => RRT_SAT
const mat3 ACESInputMat = mat3(
vec3( 0.59719, 0.07600, 0.02840 ), // transposed from source
vec3( 0.35458, 0.90834, 0.13383 ),
vec3( 0.04823, 0.01566, 0.83777 )
);
// ODT_SAT => XYZ => D60_2_D65 => sRGB
const mat3 ACESOutputMat = mat3(
vec3( 1.60475, -0.10208, -0.00327 ), // transposed from source
vec3( -0.53108, 1.10813, -0.07276 ),
vec3( -0.07367, -0.00605, 1.07602 )
);
color = ACESInputMat * color;
// Apply RRT and ODT
color = RRTAndODTFit( color );
color = ACESOutputMat * color;
// Clamp to [0, 1]
return saturate( color );
}
void main() {
vec4 tex = texture2D( tDiffuse, vUv );
tex.rgb *= exposure / 0.6; // pre-exposed, outside of the tone mapping function
gl_FragColor = vec4( ACESFilmicToneMapping( tex.rgb ), tex.a );
}`
};
export { ACESFilmicToneMappingShader };

View File

@ -0,0 +1,58 @@
/**
* Afterimage shader
* I created this effect inspired by a demo on codepen:
* https://codepen.io/brunoimbrizi/pen/MoRJaN?page=1&
*/
const AfterimageShader = {
name: 'AfterimageShader',
uniforms: {
'damp': { value: 0.96 },
'tOld': { value: null },
'tNew': { value: null }
},
vertexShader: /* glsl */`
varying vec2 vUv;
void main() {
vUv = uv;
gl_Position = projectionMatrix * modelViewMatrix * vec4( position, 1.0 );
}`,
fragmentShader: /* glsl */`
uniform float damp;
uniform sampler2D tOld;
uniform sampler2D tNew;
varying vec2 vUv;
vec4 when_gt( vec4 x, float y ) {
return max( sign( x - y ), 0.0 );
}
void main() {
vec4 texelOld = texture2D( tOld, vUv );
vec4 texelNew = texture2D( tNew, vUv );
texelOld *= damp * when_gt( texelOld, 0.1 );
gl_FragColor = max(texelNew, texelOld);
}`
};
export { AfterimageShader };

View File

@ -0,0 +1,29 @@
/**
* Simple test shader
*/
const BasicShader = {
name: 'BasicShader',
uniforms: {},
vertexShader: /* glsl */`
void main() {
gl_Position = projectionMatrix * modelViewMatrix * vec4( position, 1.0 );
}`,
fragmentShader: /* glsl */`
void main() {
gl_FragColor = vec4( 1.0, 0.0, 0.0, 0.5 );
}`
};
export { BasicShader };

View File

@ -0,0 +1,62 @@
/**
* Bleach bypass shader [http://en.wikipedia.org/wiki/Bleach_bypass]
* - based on Nvidia example
* http://developer.download.nvidia.com/shaderlibrary/webpages/shader_library.html#post_bleach_bypass
*/
const BleachBypassShader = {
name: 'BleachBypassShader',
uniforms: {
'tDiffuse': { value: null },
'opacity': { value: 1.0 }
},
vertexShader: /* glsl */`
varying vec2 vUv;
void main() {
vUv = uv;
gl_Position = projectionMatrix * modelViewMatrix * vec4( position, 1.0 );
}`,
fragmentShader: /* glsl */`
uniform float opacity;
uniform sampler2D tDiffuse;
varying vec2 vUv;
void main() {
vec4 base = texture2D( tDiffuse, vUv );
vec3 lumCoeff = vec3( 0.25, 0.65, 0.1 );
float lum = dot( lumCoeff, base.rgb );
vec3 blend = vec3( lum );
float L = min( 1.0, max( 0.0, 10.0 * ( lum - 0.45 ) ) );
vec3 result1 = 2.0 * base.rgb * blend;
vec3 result2 = 1.0 - 2.0 * ( 1.0 - blend ) * ( 1.0 - base.rgb );
vec3 newColor = mix( result1, result2, L );
float A2 = opacity * base.a;
vec3 mixRGB = A2 * newColor.rgb;
mixRGB += ( ( 1.0 - A2 ) * base.rgb );
gl_FragColor = vec4( mixRGB, base.a );
}`
};
export { BleachBypassShader };

View File

@ -0,0 +1,49 @@
/**
* Blend two textures
*/
const BlendShader = {
name: 'BlendShader',
uniforms: {
'tDiffuse1': { value: null },
'tDiffuse2': { value: null },
'mixRatio': { value: 0.5 },
'opacity': { value: 1.0 }
},
vertexShader: /* glsl */`
varying vec2 vUv;
void main() {
vUv = uv;
gl_Position = projectionMatrix * modelViewMatrix * vec4( position, 1.0 );
}`,
fragmentShader: /* glsl */`
uniform float opacity;
uniform float mixRatio;
uniform sampler2D tDiffuse1;
uniform sampler2D tDiffuse2;
varying vec2 vUv;
void main() {
vec4 texel1 = texture2D( tDiffuse1, vUv );
vec4 texel2 = texture2D( tDiffuse2, vUv );
gl_FragColor = opacity * mix( texel1, texel2, mixRatio );
}`
};
export { BlendShader };

View File

@ -0,0 +1,145 @@
/**
* Depth-of-field shader with bokeh
* ported from GLSL shader by Martins Upitis
* http://artmartinsh.blogspot.com/2010/02/glsl-lens-blur-filter-with-bokeh.html
*/
const BokehShader = {
name: 'BokehShader',
defines: {
'DEPTH_PACKING': 1,
'PERSPECTIVE_CAMERA': 1,
},
uniforms: {
'tColor': { value: null },
'tDepth': { value: null },
'focus': { value: 1.0 },
'aspect': { value: 1.0 },
'aperture': { value: 0.025 },
'maxblur': { value: 0.01 },
'nearClip': { value: 1.0 },
'farClip': { value: 1000.0 },
},
vertexShader: /* glsl */`
varying vec2 vUv;
void main() {
vUv = uv;
gl_Position = projectionMatrix * modelViewMatrix * vec4( position, 1.0 );
}`,
fragmentShader: /* glsl */`
#include <common>
varying vec2 vUv;
uniform sampler2D tColor;
uniform sampler2D tDepth;
uniform float maxblur; // max blur amount
uniform float aperture; // aperture - bigger values for shallower depth of field
uniform float nearClip;
uniform float farClip;
uniform float focus;
uniform float aspect;
#include <packing>
float getDepth( const in vec2 screenPosition ) {
#if DEPTH_PACKING == 1
return unpackRGBAToDepth( texture2D( tDepth, screenPosition ) );
#else
return texture2D( tDepth, screenPosition ).x;
#endif
}
float getViewZ( const in float depth ) {
#if PERSPECTIVE_CAMERA == 1
return perspectiveDepthToViewZ( depth, nearClip, farClip );
#else
return orthographicDepthToViewZ( depth, nearClip, farClip );
#endif
}
void main() {
vec2 aspectcorrect = vec2( 1.0, aspect );
float viewZ = getViewZ( getDepth( vUv ) );
float factor = ( focus + viewZ ); // viewZ is <= 0, so this is a difference equation
vec2 dofblur = vec2 ( clamp( factor * aperture, -maxblur, maxblur ) );
vec2 dofblur9 = dofblur * 0.9;
vec2 dofblur7 = dofblur * 0.7;
vec2 dofblur4 = dofblur * 0.4;
vec4 col = vec4( 0.0 );
col += texture2D( tColor, vUv.xy );
col += texture2D( tColor, vUv.xy + ( vec2( 0.0, 0.4 ) * aspectcorrect ) * dofblur );
col += texture2D( tColor, vUv.xy + ( vec2( 0.15, 0.37 ) * aspectcorrect ) * dofblur );
col += texture2D( tColor, vUv.xy + ( vec2( 0.29, 0.29 ) * aspectcorrect ) * dofblur );
col += texture2D( tColor, vUv.xy + ( vec2( -0.37, 0.15 ) * aspectcorrect ) * dofblur );
col += texture2D( tColor, vUv.xy + ( vec2( 0.40, 0.0 ) * aspectcorrect ) * dofblur );
col += texture2D( tColor, vUv.xy + ( vec2( 0.37, -0.15 ) * aspectcorrect ) * dofblur );
col += texture2D( tColor, vUv.xy + ( vec2( 0.29, -0.29 ) * aspectcorrect ) * dofblur );
col += texture2D( tColor, vUv.xy + ( vec2( -0.15, -0.37 ) * aspectcorrect ) * dofblur );
col += texture2D( tColor, vUv.xy + ( vec2( 0.0, -0.4 ) * aspectcorrect ) * dofblur );
col += texture2D( tColor, vUv.xy + ( vec2( -0.15, 0.37 ) * aspectcorrect ) * dofblur );
col += texture2D( tColor, vUv.xy + ( vec2( -0.29, 0.29 ) * aspectcorrect ) * dofblur );
col += texture2D( tColor, vUv.xy + ( vec2( 0.37, 0.15 ) * aspectcorrect ) * dofblur );
col += texture2D( tColor, vUv.xy + ( vec2( -0.4, 0.0 ) * aspectcorrect ) * dofblur );
col += texture2D( tColor, vUv.xy + ( vec2( -0.37, -0.15 ) * aspectcorrect ) * dofblur );
col += texture2D( tColor, vUv.xy + ( vec2( -0.29, -0.29 ) * aspectcorrect ) * dofblur );
col += texture2D( tColor, vUv.xy + ( vec2( 0.15, -0.37 ) * aspectcorrect ) * dofblur );
col += texture2D( tColor, vUv.xy + ( vec2( 0.15, 0.37 ) * aspectcorrect ) * dofblur9 );
col += texture2D( tColor, vUv.xy + ( vec2( -0.37, 0.15 ) * aspectcorrect ) * dofblur9 );
col += texture2D( tColor, vUv.xy + ( vec2( 0.37, -0.15 ) * aspectcorrect ) * dofblur9 );
col += texture2D( tColor, vUv.xy + ( vec2( -0.15, -0.37 ) * aspectcorrect ) * dofblur9 );
col += texture2D( tColor, vUv.xy + ( vec2( -0.15, 0.37 ) * aspectcorrect ) * dofblur9 );
col += texture2D( tColor, vUv.xy + ( vec2( 0.37, 0.15 ) * aspectcorrect ) * dofblur9 );
col += texture2D( tColor, vUv.xy + ( vec2( -0.37, -0.15 ) * aspectcorrect ) * dofblur9 );
col += texture2D( tColor, vUv.xy + ( vec2( 0.15, -0.37 ) * aspectcorrect ) * dofblur9 );
col += texture2D( tColor, vUv.xy + ( vec2( 0.29, 0.29 ) * aspectcorrect ) * dofblur7 );
col += texture2D( tColor, vUv.xy + ( vec2( 0.40, 0.0 ) * aspectcorrect ) * dofblur7 );
col += texture2D( tColor, vUv.xy + ( vec2( 0.29, -0.29 ) * aspectcorrect ) * dofblur7 );
col += texture2D( tColor, vUv.xy + ( vec2( 0.0, -0.4 ) * aspectcorrect ) * dofblur7 );
col += texture2D( tColor, vUv.xy + ( vec2( -0.29, 0.29 ) * aspectcorrect ) * dofblur7 );
col += texture2D( tColor, vUv.xy + ( vec2( -0.4, 0.0 ) * aspectcorrect ) * dofblur7 );
col += texture2D( tColor, vUv.xy + ( vec2( -0.29, -0.29 ) * aspectcorrect ) * dofblur7 );
col += texture2D( tColor, vUv.xy + ( vec2( 0.0, 0.4 ) * aspectcorrect ) * dofblur7 );
col += texture2D( tColor, vUv.xy + ( vec2( 0.29, 0.29 ) * aspectcorrect ) * dofblur4 );
col += texture2D( tColor, vUv.xy + ( vec2( 0.4, 0.0 ) * aspectcorrect ) * dofblur4 );
col += texture2D( tColor, vUv.xy + ( vec2( 0.29, -0.29 ) * aspectcorrect ) * dofblur4 );
col += texture2D( tColor, vUv.xy + ( vec2( 0.0, -0.4 ) * aspectcorrect ) * dofblur4 );
col += texture2D( tColor, vUv.xy + ( vec2( -0.29, 0.29 ) * aspectcorrect ) * dofblur4 );
col += texture2D( tColor, vUv.xy + ( vec2( -0.4, 0.0 ) * aspectcorrect ) * dofblur4 );
col += texture2D( tColor, vUv.xy + ( vec2( -0.29, -0.29 ) * aspectcorrect ) * dofblur4 );
col += texture2D( tColor, vUv.xy + ( vec2( 0.0, 0.4 ) * aspectcorrect ) * dofblur4 );
gl_FragColor = col / 41.0;
gl_FragColor.a = 1.0;
}`
};
export { BokehShader };

View File

@ -0,0 +1,397 @@
import {
Vector2
} from 'three';
/**
* Depth-of-field shader with bokeh
* ported from GLSL shader by Martins Upitis
* http://blenderartists.org/forum/showthread.php?237488-GLSL-depth-of-field-with-bokeh-v2-4-(update)
*
* Requires #define RINGS and SAMPLES integers
*/
const BokehShader = {
name: 'BokehShader',
uniforms: {
'textureWidth': { value: 1.0 },
'textureHeight': { value: 1.0 },
'focalDepth': { value: 1.0 },
'focalLength': { value: 24.0 },
'fstop': { value: 0.9 },
'tColor': { value: null },
'tDepth': { value: null },
'maxblur': { value: 1.0 },
'showFocus': { value: 0 },
'manualdof': { value: 0 },
'vignetting': { value: 0 },
'depthblur': { value: 0 },
'threshold': { value: 0.5 },
'gain': { value: 2.0 },
'bias': { value: 0.5 },
'fringe': { value: 0.7 },
'znear': { value: 0.1 },
'zfar': { value: 100 },
'noise': { value: 1 },
'dithering': { value: 0.0001 },
'pentagon': { value: 0 },
'shaderFocus': { value: 1 },
'focusCoords': { value: new Vector2() }
},
vertexShader: /* glsl */`
varying vec2 vUv;
void main() {
vUv = uv;
gl_Position = projectionMatrix * modelViewMatrix * vec4( position, 1.0 );
}`,
fragmentShader: /* glsl */`
#include <common>
varying vec2 vUv;
uniform sampler2D tColor;
uniform sampler2D tDepth;
uniform float textureWidth;
uniform float textureHeight;
uniform float focalDepth; //focal distance value in meters, but you may use autofocus option below
uniform float focalLength; //focal length in mm
uniform float fstop; //f-stop value
uniform bool showFocus; //show debug focus point and focal range (red = focal point, green = focal range)
/*
make sure that these two values are the same for your camera, otherwise distances will be wrong.
*/
uniform float znear; // camera clipping start
uniform float zfar; // camera clipping end
//------------------------------------------
//user variables
const int samples = SAMPLES; //samples on the first ring
const int rings = RINGS; //ring count
const int maxringsamples = rings * samples;
uniform bool manualdof; // manual dof calculation
float ndofstart = 1.0; // near dof blur start
float ndofdist = 2.0; // near dof blur falloff distance
float fdofstart = 1.0; // far dof blur start
float fdofdist = 3.0; // far dof blur falloff distance
float CoC = 0.03; //circle of confusion size in mm (35mm film = 0.03mm)
uniform bool vignetting; // use optical lens vignetting
float vignout = 1.3; // vignetting outer border
float vignin = 0.0; // vignetting inner border
float vignfade = 22.0; // f-stops till vignete fades
uniform bool shaderFocus;
// disable if you use external focalDepth value
uniform vec2 focusCoords;
// autofocus point on screen (0.0,0.0 - left lower corner, 1.0,1.0 - upper right)
// if center of screen use vec2(0.5, 0.5);
uniform float maxblur;
//clamp value of max blur (0.0 = no blur, 1.0 default)
uniform float threshold; // highlight threshold;
uniform float gain; // highlight gain;
uniform float bias; // bokeh edge bias
uniform float fringe; // bokeh chromatic aberration / fringing
uniform bool noise; //use noise instead of pattern for sample dithering
uniform float dithering;
uniform bool depthblur; // blur the depth buffer
float dbsize = 1.25; // depth blur size
/*
next part is experimental
not looking good with small sample and ring count
looks okay starting from samples = 4, rings = 4
*/
uniform bool pentagon; //use pentagon as bokeh shape?
float feather = 0.4; //pentagon shape feather
//------------------------------------------
float penta(vec2 coords) {
//pentagonal shape
float scale = float(rings) - 1.3;
vec4 HS0 = vec4( 1.0, 0.0, 0.0, 1.0);
vec4 HS1 = vec4( 0.309016994, 0.951056516, 0.0, 1.0);
vec4 HS2 = vec4(-0.809016994, 0.587785252, 0.0, 1.0);
vec4 HS3 = vec4(-0.809016994,-0.587785252, 0.0, 1.0);
vec4 HS4 = vec4( 0.309016994,-0.951056516, 0.0, 1.0);
vec4 HS5 = vec4( 0.0 ,0.0 , 1.0, 1.0);
vec4 one = vec4( 1.0 );
vec4 P = vec4((coords),vec2(scale, scale));
vec4 dist = vec4(0.0);
float inorout = -4.0;
dist.x = dot( P, HS0 );
dist.y = dot( P, HS1 );
dist.z = dot( P, HS2 );
dist.w = dot( P, HS3 );
dist = smoothstep( -feather, feather, dist );
inorout += dot( dist, one );
dist.x = dot( P, HS4 );
dist.y = HS5.w - abs( P.z );
dist = smoothstep( -feather, feather, dist );
inorout += dist.x;
return clamp( inorout, 0.0, 1.0 );
}
float bdepth(vec2 coords) {
// Depth buffer blur
float d = 0.0;
float kernel[9];
vec2 offset[9];
vec2 wh = vec2(1.0/textureWidth,1.0/textureHeight) * dbsize;
offset[0] = vec2(-wh.x,-wh.y);
offset[1] = vec2( 0.0, -wh.y);
offset[2] = vec2( wh.x -wh.y);
offset[3] = vec2(-wh.x, 0.0);
offset[4] = vec2( 0.0, 0.0);
offset[5] = vec2( wh.x, 0.0);
offset[6] = vec2(-wh.x, wh.y);
offset[7] = vec2( 0.0, wh.y);
offset[8] = vec2( wh.x, wh.y);
kernel[0] = 1.0/16.0; kernel[1] = 2.0/16.0; kernel[2] = 1.0/16.0;
kernel[3] = 2.0/16.0; kernel[4] = 4.0/16.0; kernel[5] = 2.0/16.0;
kernel[6] = 1.0/16.0; kernel[7] = 2.0/16.0; kernel[8] = 1.0/16.0;
for( int i=0; i<9; i++ ) {
float tmp = texture2D(tDepth, coords + offset[i]).r;
d += tmp * kernel[i];
}
return d;
}
vec3 color(vec2 coords,float blur) {
//processing the sample
vec3 col = vec3(0.0);
vec2 texel = vec2(1.0/textureWidth,1.0/textureHeight);
col.r = texture2D(tColor,coords + vec2(0.0,1.0)*texel*fringe*blur).r;
col.g = texture2D(tColor,coords + vec2(-0.866,-0.5)*texel*fringe*blur).g;
col.b = texture2D(tColor,coords + vec2(0.866,-0.5)*texel*fringe*blur).b;
vec3 lumcoeff = vec3(0.299,0.587,0.114);
float lum = dot(col.rgb, lumcoeff);
float thresh = max((lum-threshold)*gain, 0.0);
return col+mix(vec3(0.0),col,thresh*blur);
}
vec3 debugFocus(vec3 col, float blur, float depth) {
float edge = 0.002*depth; //distance based edge smoothing
float m = clamp(smoothstep(0.0,edge,blur),0.0,1.0);
float e = clamp(smoothstep(1.0-edge,1.0,blur),0.0,1.0);
col = mix(col,vec3(1.0,0.5,0.0),(1.0-m)*0.6);
col = mix(col,vec3(0.0,0.5,1.0),((1.0-e)-(1.0-m))*0.2);
return col;
}
float linearize(float depth) {
return -zfar * znear / (depth * (zfar - znear) - zfar);
}
float vignette() {
float dist = distance(vUv.xy, vec2(0.5,0.5));
dist = smoothstep(vignout+(fstop/vignfade), vignin+(fstop/vignfade), dist);
return clamp(dist,0.0,1.0);
}
float gather(float i, float j, int ringsamples, inout vec3 col, float w, float h, float blur) {
float rings2 = float(rings);
float step = PI*2.0 / float(ringsamples);
float pw = cos(j*step)*i;
float ph = sin(j*step)*i;
float p = 1.0;
if (pentagon) {
p = penta(vec2(pw,ph));
}
col += color(vUv.xy + vec2(pw*w,ph*h), blur) * mix(1.0, i/rings2, bias) * p;
return 1.0 * mix(1.0, i /rings2, bias) * p;
}
void main() {
//scene depth calculation
float depth = linearize(texture2D(tDepth,vUv.xy).x);
// Blur depth?
if ( depthblur ) {
depth = linearize(bdepth(vUv.xy));
}
//focal plane calculation
float fDepth = focalDepth;
if (shaderFocus) {
fDepth = linearize(texture2D(tDepth,focusCoords).x);
}
// dof blur factor calculation
float blur = 0.0;
if (manualdof) {
float a = depth-fDepth; // Focal plane
float b = (a-fdofstart)/fdofdist; // Far DoF
float c = (-a-ndofstart)/ndofdist; // Near Dof
blur = (a>0.0) ? b : c;
} else {
float f = focalLength; // focal length in mm
float d = fDepth*1000.0; // focal plane in mm
float o = depth*1000.0; // depth in mm
float a = (o*f)/(o-f);
float b = (d*f)/(d-f);
float c = (d-f)/(d*fstop*CoC);
blur = abs(a-b)*c;
}
blur = clamp(blur,0.0,1.0);
// calculation of pattern for dithering
vec2 noise = vec2(rand(vUv.xy), rand( vUv.xy + vec2( 0.4, 0.6 ) ) )*dithering*blur;
// getting blur x and y step factor
float w = (1.0/textureWidth)*blur*maxblur+noise.x;
float h = (1.0/textureHeight)*blur*maxblur+noise.y;
// calculation of final color
vec3 col = vec3(0.0);
if(blur < 0.05) {
//some optimization thingy
col = texture2D(tColor, vUv.xy).rgb;
} else {
col = texture2D(tColor, vUv.xy).rgb;
float s = 1.0;
int ringsamples;
for (int i = 1; i <= rings; i++) {
/*unboxstart*/
ringsamples = i * samples;
for (int j = 0 ; j < maxringsamples ; j++) {
if (j >= ringsamples) break;
s += gather(float(i), float(j), ringsamples, col, w, h, blur);
}
/*unboxend*/
}
col /= s; //divide by sample count
}
if (showFocus) {
col = debugFocus(col, blur, depth);
}
if (vignetting) {
col *= vignette();
}
gl_FragColor.rgb = col;
gl_FragColor.a = 1.0;
#include <tonemapping_fragment>
#include <colorspace_fragment>
}`
};
const BokehDepthShader = {
name: 'BokehDepthShader',
uniforms: {
'mNear': { value: 1.0 },
'mFar': { value: 1000.0 },
},
vertexShader: /* glsl */`
varying float vViewZDepth;
void main() {
#include <begin_vertex>
#include <project_vertex>
vViewZDepth = - mvPosition.z;
}`,
fragmentShader: /* glsl */`
uniform float mNear;
uniform float mFar;
varying float vViewZDepth;
void main() {
float color = 1.0 - smoothstep( mNear, mFar, vViewZDepth );
gl_FragColor = vec4( vec3( color ), 1.0 );
}`
};
export { BokehShader, BokehDepthShader };

View File

@ -0,0 +1,56 @@
/**
* Brightness and contrast adjustment
* https://github.com/evanw/glfx.js
* brightness: -1 to 1 (-1 is solid black, 0 is no change, and 1 is solid white)
* contrast: -1 to 1 (-1 is solid gray, 0 is no change, and 1 is maximum contrast)
*/
const BrightnessContrastShader = {
name: 'BrightnessContrastShader',
uniforms: {
'tDiffuse': { value: null },
'brightness': { value: 0 },
'contrast': { value: 0 }
},
vertexShader: /* glsl */`
varying vec2 vUv;
void main() {
vUv = uv;
gl_Position = projectionMatrix * modelViewMatrix * vec4( position, 1.0 );
}`,
fragmentShader: /* glsl */`
uniform sampler2D tDiffuse;
uniform float brightness;
uniform float contrast;
varying vec2 vUv;
void main() {
gl_FragColor = texture2D( tDiffuse, vUv );
gl_FragColor.rgb += brightness;
if (contrast > 0.0) {
gl_FragColor.rgb = (gl_FragColor.rgb - 0.5) / (1.0 - contrast) + 0.5;
} else {
gl_FragColor.rgb = (gl_FragColor.rgb - 0.5) * (1.0 + contrast) + 0.5;
}
}`
};
export { BrightnessContrastShader };

View File

@ -0,0 +1,52 @@
import {
Vector3
} from 'three';
/**
* Color correction
*/
const ColorCorrectionShader = {
name: 'ColorCorrectionShader',
uniforms: {
'tDiffuse': { value: null },
'powRGB': { value: new Vector3( 2, 2, 2 ) },
'mulRGB': { value: new Vector3( 1, 1, 1 ) },
'addRGB': { value: new Vector3( 0, 0, 0 ) }
},
vertexShader: /* glsl */`
varying vec2 vUv;
void main() {
vUv = uv;
gl_Position = projectionMatrix * modelViewMatrix * vec4( position, 1.0 );
}`,
fragmentShader: /* glsl */`
uniform sampler2D tDiffuse;
uniform vec3 powRGB;
uniform vec3 mulRGB;
uniform vec3 addRGB;
varying vec2 vUv;
void main() {
gl_FragColor = texture2D( tDiffuse, vUv );
gl_FragColor.rgb = mulRGB * pow( ( gl_FragColor.rgb + addRGB ), powRGB );
}`
};
export { ColorCorrectionShader };

View File

@ -0,0 +1,51 @@
import {
Color
} from 'three';
/**
* Colorify shader
*/
const ColorifyShader = {
name: 'ColorifyShader',
uniforms: {
'tDiffuse': { value: null },
'color': { value: new Color( 0xffffff ) }
},
vertexShader: /* glsl */`
varying vec2 vUv;
void main() {
vUv = uv;
gl_Position = projectionMatrix * modelViewMatrix * vec4( position, 1.0 );
}`,
fragmentShader: /* glsl */`
uniform vec3 color;
uniform sampler2D tDiffuse;
varying vec2 vUv;
void main() {
vec4 texel = texture2D( tDiffuse, vUv );
vec3 luma = vec3( 0.299, 0.587, 0.114 );
float v = dot( texel.xyz, luma );
gl_FragColor = vec4( v * color, texel.w );
}`
};
export { ColorifyShader };

View File

@ -0,0 +1,103 @@
import {
Vector2
} from 'three';
/**
* Convolution shader
* ported from o3d sample to WebGL / GLSL
*/
const ConvolutionShader = {
name: 'ConvolutionShader',
defines: {
'KERNEL_SIZE_FLOAT': '25.0',
'KERNEL_SIZE_INT': '25'
},
uniforms: {
'tDiffuse': { value: null },
'uImageIncrement': { value: new Vector2( 0.001953125, 0.0 ) },
'cKernel': { value: [] }
},
vertexShader: /* glsl */`
uniform vec2 uImageIncrement;
varying vec2 vUv;
void main() {
vUv = uv - ( ( KERNEL_SIZE_FLOAT - 1.0 ) / 2.0 ) * uImageIncrement;
gl_Position = projectionMatrix * modelViewMatrix * vec4( position, 1.0 );
}`,
fragmentShader: /* glsl */`
uniform float cKernel[ KERNEL_SIZE_INT ];
uniform sampler2D tDiffuse;
uniform vec2 uImageIncrement;
varying vec2 vUv;
void main() {
vec2 imageCoord = vUv;
vec4 sum = vec4( 0.0, 0.0, 0.0, 0.0 );
for( int i = 0; i < KERNEL_SIZE_INT; i ++ ) {
sum += texture2D( tDiffuse, imageCoord ) * cKernel[ i ];
imageCoord += uImageIncrement;
}
gl_FragColor = sum;
}`,
buildKernel: function ( sigma ) {
// We lop off the sqrt(2 * pi) * sigma term, since we're going to normalize anyway.
const kMaxKernelSize = 25;
let kernelSize = 2 * Math.ceil( sigma * 3.0 ) + 1;
if ( kernelSize > kMaxKernelSize ) kernelSize = kMaxKernelSize;
const halfWidth = ( kernelSize - 1 ) * 0.5;
const values = new Array( kernelSize );
let sum = 0.0;
for ( let i = 0; i < kernelSize; ++ i ) {
values[ i ] = gauss( i - halfWidth, sigma );
sum += values[ i ];
}
// normalize the kernel
for ( let i = 0; i < kernelSize; ++ i ) values[ i ] /= sum;
return values;
}
};
function gauss( x, sigma ) {
return Math.exp( - ( x * x ) / ( 2.0 * sigma * sigma ) );
}
export { ConvolutionShader };

View File

@ -0,0 +1,45 @@
/**
* Full-screen textured quad shader
*/
const CopyShader = {
name: 'CopyShader',
uniforms: {
'tDiffuse': { value: null },
'opacity': { value: 1.0 }
},
vertexShader: /* glsl */`
varying vec2 vUv;
void main() {
vUv = uv;
gl_Position = projectionMatrix * modelViewMatrix * vec4( position, 1.0 );
}`,
fragmentShader: /* glsl */`
uniform float opacity;
uniform sampler2D tDiffuse;
varying vec2 vUv;
void main() {
vec4 texel = texture2D( tDiffuse, vUv );
gl_FragColor = opacity * texel;
}`
};
export { CopyShader };

View File

@ -0,0 +1,56 @@
/**
* Depth-of-field shader using mipmaps
* - from Matt Handley @applmak
* - requires power-of-2 sized render target with enabled mipmaps
*/
const DOFMipMapShader = {
name: 'DOFMipMapShader',
uniforms: {
'tColor': { value: null },
'tDepth': { value: null },
'focus': { value: 1.0 },
'maxblur': { value: 1.0 }
},
vertexShader: /* glsl */`
varying vec2 vUv;
void main() {
vUv = uv;
gl_Position = projectionMatrix * modelViewMatrix * vec4( position, 1.0 );
}`,
fragmentShader: /* glsl */`
uniform float focus;
uniform float maxblur;
uniform sampler2D tColor;
uniform sampler2D tDepth;
varying vec2 vUv;
void main() {
vec4 depth = texture2D( tDepth, vUv );
float factor = depth.x - focus;
vec4 col = texture2D( tColor, vUv, 2.0 * maxblur * abs( focus - depth.x ) );
gl_FragColor = col;
gl_FragColor.a = 1.0;
}`
};
export { DOFMipMapShader };

View File

@ -0,0 +1,171 @@
import {
Vector2
} from 'three';
/**
* TODO
*/
const DepthLimitedBlurShader = {
name: 'DepthLimitedBlurShader',
defines: {
'KERNEL_RADIUS': 4,
'DEPTH_PACKING': 1,
'PERSPECTIVE_CAMERA': 1
},
uniforms: {
'tDiffuse': { value: null },
'size': { value: new Vector2( 512, 512 ) },
'sampleUvOffsets': { value: [ new Vector2( 0, 0 ) ] },
'sampleWeights': { value: [ 1.0 ] },
'tDepth': { value: null },
'cameraNear': { value: 10 },
'cameraFar': { value: 1000 },
'depthCutoff': { value: 10 },
},
vertexShader: /* glsl */`
#include <common>
uniform vec2 size;
varying vec2 vUv;
varying vec2 vInvSize;
void main() {
vUv = uv;
vInvSize = 1.0 / size;
gl_Position = projectionMatrix * modelViewMatrix * vec4( position, 1.0 );
}`,
fragmentShader: /* glsl */`
#include <common>
#include <packing>
uniform sampler2D tDiffuse;
uniform sampler2D tDepth;
uniform float cameraNear;
uniform float cameraFar;
uniform float depthCutoff;
uniform vec2 sampleUvOffsets[ KERNEL_RADIUS + 1 ];
uniform float sampleWeights[ KERNEL_RADIUS + 1 ];
varying vec2 vUv;
varying vec2 vInvSize;
float getDepth( const in vec2 screenPosition ) {
#if DEPTH_PACKING == 1
return unpackRGBAToDepth( texture2D( tDepth, screenPosition ) );
#else
return texture2D( tDepth, screenPosition ).x;
#endif
}
float getViewZ( const in float depth ) {
#if PERSPECTIVE_CAMERA == 1
return perspectiveDepthToViewZ( depth, cameraNear, cameraFar );
#else
return orthographicDepthToViewZ( depth, cameraNear, cameraFar );
#endif
}
void main() {
float depth = getDepth( vUv );
if( depth >= ( 1.0 - EPSILON ) ) {
discard;
}
float centerViewZ = -getViewZ( depth );
bool rBreak = false, lBreak = false;
float weightSum = sampleWeights[0];
vec4 diffuseSum = texture2D( tDiffuse, vUv ) * weightSum;
for( int i = 1; i <= KERNEL_RADIUS; i ++ ) {
float sampleWeight = sampleWeights[i];
vec2 sampleUvOffset = sampleUvOffsets[i] * vInvSize;
vec2 sampleUv = vUv + sampleUvOffset;
float viewZ = -getViewZ( getDepth( sampleUv ) );
if( abs( viewZ - centerViewZ ) > depthCutoff ) rBreak = true;
if( ! rBreak ) {
diffuseSum += texture2D( tDiffuse, sampleUv ) * sampleWeight;
weightSum += sampleWeight;
}
sampleUv = vUv - sampleUvOffset;
viewZ = -getViewZ( getDepth( sampleUv ) );
if( abs( viewZ - centerViewZ ) > depthCutoff ) lBreak = true;
if( ! lBreak ) {
diffuseSum += texture2D( tDiffuse, sampleUv ) * sampleWeight;
weightSum += sampleWeight;
}
}
gl_FragColor = diffuseSum / weightSum;
}`
};
const BlurShaderUtils = {
createSampleWeights: function ( kernelRadius, stdDev ) {
const weights = [];
for ( let i = 0; i <= kernelRadius; i ++ ) {
weights.push( gaussian( i, stdDev ) );
}
return weights;
},
createSampleOffsets: function ( kernelRadius, uvIncrement ) {
const offsets = [];
for ( let i = 0; i <= kernelRadius; i ++ ) {
offsets.push( uvIncrement.clone().multiplyScalar( i ) );
}
return offsets;
},
configure: function ( material, kernelRadius, stdDev, uvIncrement ) {
material.defines[ 'KERNEL_RADIUS' ] = kernelRadius;
material.uniforms[ 'sampleUvOffsets' ].value = BlurShaderUtils.createSampleOffsets( kernelRadius, uvIncrement );
material.uniforms[ 'sampleWeights' ].value = BlurShaderUtils.createSampleWeights( kernelRadius, stdDev );
material.needsUpdate = true;
}
};
function gaussian( x, stdDev ) {
return Math.exp( - ( x * x ) / ( 2.0 * ( stdDev * stdDev ) ) ) / ( Math.sqrt( 2.0 * Math.PI ) * stdDev );
}
export { DepthLimitedBlurShader, BlurShaderUtils };

View File

@ -0,0 +1,101 @@
/**
* RGB Shift Shader
* Shifts red and blue channels from center in opposite directions
* Ported from http://kriss.cx/tom/2009/05/rgb-shift/
* by Tom Butterworth / http://kriss.cx/tom/
*
* amount: shift distance (1 is width of input)
* angle: shift angle in radians
*/
const DigitalGlitch = {
uniforms: {
'tDiffuse': { value: null }, //diffuse texture
'tDisp': { value: null }, //displacement texture for digital glitch squares
'byp': { value: 0 }, //apply the glitch ?
'amount': { value: 0.08 },
'angle': { value: 0.02 },
'seed': { value: 0.02 },
'seed_x': { value: 0.02 }, //-1,1
'seed_y': { value: 0.02 }, //-1,1
'distortion_x': { value: 0.5 },
'distortion_y': { value: 0.6 },
'col_s': { value: 0.05 }
},
vertexShader: /* glsl */`
varying vec2 vUv;
void main() {
vUv = uv;
gl_Position = projectionMatrix * modelViewMatrix * vec4( position, 1.0 );
}`,
fragmentShader: /* glsl */`
uniform int byp; //should we apply the glitch ?
uniform sampler2D tDiffuse;
uniform sampler2D tDisp;
uniform float amount;
uniform float angle;
uniform float seed;
uniform float seed_x;
uniform float seed_y;
uniform float distortion_x;
uniform float distortion_y;
uniform float col_s;
varying vec2 vUv;
float rand(vec2 co){
return fract(sin(dot(co.xy ,vec2(12.9898,78.233))) * 43758.5453);
}
void main() {
if(byp<1) {
vec2 p = vUv;
float xs = floor(gl_FragCoord.x / 0.5);
float ys = floor(gl_FragCoord.y / 0.5);
//based on staffantans glitch shader for unity https://github.com/staffantan/unityglitch
float disp = texture2D(tDisp, p*seed*seed).r;
if(p.y<distortion_x+col_s && p.y>distortion_x-col_s*seed) {
if(seed_x>0.){
p.y = 1. - (p.y + distortion_y);
}
else {
p.y = distortion_y;
}
}
if(p.x<distortion_y+col_s && p.x>distortion_y-col_s*seed) {
if(seed_y>0.){
p.x=distortion_x;
}
else {
p.x = 1. - (p.x + distortion_x);
}
}
p.x+=disp*seed_x*(seed/5.);
p.y+=disp*seed_y*(seed/5.);
//base from RGB shift shader
vec2 offset = amount * vec2( cos(angle), sin(angle));
vec4 cr = texture2D(tDiffuse, p + offset);
vec4 cga = texture2D(tDiffuse, p);
vec4 cb = texture2D(tDiffuse, p - offset);
gl_FragColor = vec4(cr.r, cga.g, cb.b, cga.a);
//add noise
vec4 snow = 200.*amount*vec4(rand(vec2(xs * seed,ys * seed*50.))*0.2);
gl_FragColor = gl_FragColor+ snow;
}
else {
gl_FragColor=texture2D (tDiffuse, vUv);
}
}`
};
export { DigitalGlitch };

View File

@ -0,0 +1,70 @@
import {
Vector2
} from 'three';
/**
* Dot screen shader
* based on glfx.js sepia shader
* https://github.com/evanw/glfx.js
*/
const DotScreenShader = {
name: 'DotScreenShader',
uniforms: {
'tDiffuse': { value: null },
'tSize': { value: new Vector2( 256, 256 ) },
'center': { value: new Vector2( 0.5, 0.5 ) },
'angle': { value: 1.57 },
'scale': { value: 1.0 }
},
vertexShader: /* glsl */`
varying vec2 vUv;
void main() {
vUv = uv;
gl_Position = projectionMatrix * modelViewMatrix * vec4( position, 1.0 );
}`,
fragmentShader: /* glsl */`
uniform vec2 center;
uniform float angle;
uniform float scale;
uniform vec2 tSize;
uniform sampler2D tDiffuse;
varying vec2 vUv;
float pattern() {
float s = sin( angle ), c = cos( angle );
vec2 tex = vUv * tSize - center;
vec2 point = vec2( c * tex.x - s * tex.y, s * tex.x + c * tex.y ) * scale;
return ( sin( point.x ) * sin( point.y ) ) * 4.0;
}
void main() {
vec4 color = texture2D( tDiffuse, vUv );
float average = ( color.r + color.g + color.b ) / 3.0;
gl_FragColor = vec4( vec3( average * 10.0 - 5.0 + pattern() ), color.a );
}`
};
export { DotScreenShader };

View File

@ -0,0 +1,44 @@
/**
* Exposure shader
*/
const ExposureShader = {
name: 'ExposureShader',
uniforms: {
'tDiffuse': { value: null },
'exposure': { value: 1.0 }
},
vertexShader: /* glsl */`
varying vec2 vUv;
void main() {
vUv = uv;
gl_Position = projectionMatrix * modelViewMatrix * vec4( position, 1.0 );
}`,
fragmentShader: /* glsl */`
uniform float exposure;
uniform sampler2D tDiffuse;
varying vec2 vUv;
void main() {
gl_FragColor = texture2D( tDiffuse, vUv );
gl_FragColor.rgb *= exposure;
}`
};
export { ExposureShader };

View File

@ -0,0 +1,288 @@
import {
Vector2
} from 'three';
/**
* NVIDIA FXAA by Timothy Lottes
* https://developer.download.nvidia.com/assets/gamedev/files/sdk/11/FXAA_WhitePaper.pdf
* - WebGL port by @supereggbert
* http://www.glge.org/demos/fxaa/
* Further improved by Daniel Sturk
*/
const FXAAShader = {
name: 'FXAAShader',
uniforms: {
'tDiffuse': { value: null },
'resolution': { value: new Vector2( 1 / 1024, 1 / 512 ) }
},
vertexShader: /* glsl */`
varying vec2 vUv;
void main() {
vUv = uv;
gl_Position = projectionMatrix * modelViewMatrix * vec4( position, 1.0 );
}`,
fragmentShader: /* glsl */`
precision highp float;
uniform sampler2D tDiffuse;
uniform vec2 resolution;
varying vec2 vUv;
// FXAA 3.11 implementation by NVIDIA, ported to WebGL by Agost Biro (biro@archilogic.com)
//----------------------------------------------------------------------------------
// File: es3-kepler\FXAA\assets\shaders/FXAA_DefaultES.frag
// SDK Version: v3.00
// Email: gameworks@nvidia.com
// Site: http://developer.nvidia.com/
//
// Copyright (c) 2014-2015, NVIDIA CORPORATION. All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
//----------------------------------------------------------------------------------
#ifndef FXAA_DISCARD
//
// Only valid for PC OpenGL currently.
// Probably will not work when FXAA_GREEN_AS_LUMA = 1.
//
// 1 = Use discard on pixels which don't need AA.
// For APIs which enable concurrent TEX+ROP from same surface.
// 0 = Return unchanged color on pixels which don't need AA.
//
#define FXAA_DISCARD 0
#endif
/*--------------------------------------------------------------------------*/
#define FxaaTexTop(t, p) texture2D(t, p, -100.0)
#define FxaaTexOff(t, p, o, r) texture2D(t, p + (o * r), -100.0)
/*--------------------------------------------------------------------------*/
#define NUM_SAMPLES 5
// assumes colors have premultipliedAlpha, so that the calculated color contrast is scaled by alpha
float contrast( vec4 a, vec4 b ) {
vec4 diff = abs( a - b );
return max( max( max( diff.r, diff.g ), diff.b ), diff.a );
}
/*============================================================================
FXAA3 QUALITY - PC
============================================================================*/
/*--------------------------------------------------------------------------*/
vec4 FxaaPixelShader(
vec2 posM,
sampler2D tex,
vec2 fxaaQualityRcpFrame,
float fxaaQualityEdgeThreshold,
float fxaaQualityinvEdgeThreshold
) {
vec4 rgbaM = FxaaTexTop(tex, posM);
vec4 rgbaS = FxaaTexOff(tex, posM, vec2( 0.0, 1.0), fxaaQualityRcpFrame.xy);
vec4 rgbaE = FxaaTexOff(tex, posM, vec2( 1.0, 0.0), fxaaQualityRcpFrame.xy);
vec4 rgbaN = FxaaTexOff(tex, posM, vec2( 0.0,-1.0), fxaaQualityRcpFrame.xy);
vec4 rgbaW = FxaaTexOff(tex, posM, vec2(-1.0, 0.0), fxaaQualityRcpFrame.xy);
// . S .
// W M E
// . N .
bool earlyExit = max( max( max(
contrast( rgbaM, rgbaN ),
contrast( rgbaM, rgbaS ) ),
contrast( rgbaM, rgbaE ) ),
contrast( rgbaM, rgbaW ) )
< fxaaQualityEdgeThreshold;
// . 0 .
// 0 0 0
// . 0 .
#if (FXAA_DISCARD == 1)
if(earlyExit) FxaaDiscard;
#else
if(earlyExit) return rgbaM;
#endif
float contrastN = contrast( rgbaM, rgbaN );
float contrastS = contrast( rgbaM, rgbaS );
float contrastE = contrast( rgbaM, rgbaE );
float contrastW = contrast( rgbaM, rgbaW );
float relativeVContrast = ( contrastN + contrastS ) - ( contrastE + contrastW );
relativeVContrast *= fxaaQualityinvEdgeThreshold;
bool horzSpan = relativeVContrast > 0.;
// . 1 .
// 0 0 0
// . 1 .
// 45 deg edge detection and corners of objects, aka V/H contrast is too similar
if( abs( relativeVContrast ) < .3 ) {
// locate the edge
vec2 dirToEdge;
dirToEdge.x = contrastE > contrastW ? 1. : -1.;
dirToEdge.y = contrastS > contrastN ? 1. : -1.;
// . 2 . . 1 .
// 1 0 2 ~= 0 0 1
// . 1 . . 0 .
// tap 2 pixels and see which ones are "outside" the edge, to
// determine if the edge is vertical or horizontal
vec4 rgbaAlongH = FxaaTexOff(tex, posM, vec2( dirToEdge.x, -dirToEdge.y ), fxaaQualityRcpFrame.xy);
float matchAlongH = contrast( rgbaM, rgbaAlongH );
// . 1 .
// 0 0 1
// . 0 H
vec4 rgbaAlongV = FxaaTexOff(tex, posM, vec2( -dirToEdge.x, dirToEdge.y ), fxaaQualityRcpFrame.xy);
float matchAlongV = contrast( rgbaM, rgbaAlongV );
// V 1 .
// 0 0 1
// . 0 .
relativeVContrast = matchAlongV - matchAlongH;
relativeVContrast *= fxaaQualityinvEdgeThreshold;
if( abs( relativeVContrast ) < .3 ) { // 45 deg edge
// 1 1 .
// 0 0 1
// . 0 1
// do a simple blur
return mix(
rgbaM,
(rgbaN + rgbaS + rgbaE + rgbaW) * .25,
.4
);
}
horzSpan = relativeVContrast > 0.;
}
if(!horzSpan) rgbaN = rgbaW;
if(!horzSpan) rgbaS = rgbaE;
// . 0 . 1
// 1 0 1 -> 0
// . 0 . 1
bool pairN = contrast( rgbaM, rgbaN ) > contrast( rgbaM, rgbaS );
if(!pairN) rgbaN = rgbaS;
vec2 offNP;
offNP.x = (!horzSpan) ? 0.0 : fxaaQualityRcpFrame.x;
offNP.y = ( horzSpan) ? 0.0 : fxaaQualityRcpFrame.y;
bool doneN = false;
bool doneP = false;
float nDist = 0.;
float pDist = 0.;
vec2 posN = posM;
vec2 posP = posM;
int iterationsUsed = 0;
int iterationsUsedN = 0;
int iterationsUsedP = 0;
for( int i = 0; i < NUM_SAMPLES; i++ ) {
iterationsUsed = i;
float increment = float(i + 1);
if(!doneN) {
nDist += increment;
posN = posM + offNP * nDist;
vec4 rgbaEndN = FxaaTexTop(tex, posN.xy);
doneN = contrast( rgbaEndN, rgbaM ) > contrast( rgbaEndN, rgbaN );
iterationsUsedN = i;
}
if(!doneP) {
pDist += increment;
posP = posM - offNP * pDist;
vec4 rgbaEndP = FxaaTexTop(tex, posP.xy);
doneP = contrast( rgbaEndP, rgbaM ) > contrast( rgbaEndP, rgbaN );
iterationsUsedP = i;
}
if(doneN || doneP) break;
}
if ( !doneP && !doneN ) return rgbaM; // failed to find end of edge
float dist = min(
doneN ? float( iterationsUsedN ) / float( NUM_SAMPLES - 1 ) : 1.,
doneP ? float( iterationsUsedP ) / float( NUM_SAMPLES - 1 ) : 1.
);
// hacky way of reduces blurriness of mostly diagonal edges
// but reduces AA quality
dist = pow(dist, .5);
dist = 1. - dist;
return mix(
rgbaM,
rgbaN,
dist * .5
);
}
void main() {
const float edgeDetectionQuality = .2;
const float invEdgeDetectionQuality = 1. / edgeDetectionQuality;
gl_FragColor = FxaaPixelShader(
vUv,
tDiffuse,
resolution,
edgeDetectionQuality, // [0,1] contrast needed, otherwise early discard
invEdgeDetectionQuality
);
}
`
};
export { FXAAShader };

View File

@ -0,0 +1,59 @@
const FilmShader = {
name: 'FilmShader',
uniforms: {
'tDiffuse': { value: null },
'time': { value: 0.0 },
'intensity': { value: 0.5 },
'grayscale': { value: false }
},
vertexShader: /* glsl */`
varying vec2 vUv;
void main() {
vUv = uv;
gl_Position = projectionMatrix * modelViewMatrix * vec4( position, 1.0 );
}`,
fragmentShader: /* glsl */`
#include <common>
uniform float intensity;
uniform bool grayscale;
uniform float time;
uniform sampler2D tDiffuse;
varying vec2 vUv;
void main() {
vec4 base = texture2D( tDiffuse, vUv );
float noise = rand( fract( vUv + time ) );
vec3 color = base.rgb + base.rgb * clamp( 0.1 + noise, 0.0, 1.0 );
color = mix( base.rgb, color, intensity );
if ( grayscale ) {
color = vec3( luminance( color ) ); // assuming linear-srgb
}
gl_FragColor = vec4( color, base.a );
}`,
};
export { FilmShader };

View File

@ -0,0 +1,89 @@
/**
* Focus shader
* based on PaintEffect postprocess from ro.me
* http://code.google.com/p/3-dreams-of-black/source/browse/deploy/js/effects/PaintEffect.js
*/
const FocusShader = {
name: 'FocusShader',
uniforms: {
'tDiffuse': { value: null },
'screenWidth': { value: 1024 },
'screenHeight': { value: 1024 },
'sampleDistance': { value: 0.94 },
'waveFactor': { value: 0.00125 }
},
vertexShader: /* glsl */`
varying vec2 vUv;
void main() {
vUv = uv;
gl_Position = projectionMatrix * modelViewMatrix * vec4( position, 1.0 );
}`,
fragmentShader: /* glsl */`
uniform float screenWidth;
uniform float screenHeight;
uniform float sampleDistance;
uniform float waveFactor;
uniform sampler2D tDiffuse;
varying vec2 vUv;
void main() {
vec4 color, org, tmp, add;
float sample_dist, f;
vec2 vin;
vec2 uv = vUv;
add = color = org = texture2D( tDiffuse, uv );
vin = ( uv - vec2( 0.5 ) ) * vec2( 1.4 );
sample_dist = dot( vin, vin ) * 2.0;
f = ( waveFactor * 100.0 + sample_dist ) * sampleDistance * 4.0;
vec2 sampleSize = vec2( 1.0 / screenWidth, 1.0 / screenHeight ) * vec2( f );
add += tmp = texture2D( tDiffuse, uv + vec2( 0.111964, 0.993712 ) * sampleSize );
if( tmp.b < color.b ) color = tmp;
add += tmp = texture2D( tDiffuse, uv + vec2( 0.846724, 0.532032 ) * sampleSize );
if( tmp.b < color.b ) color = tmp;
add += tmp = texture2D( tDiffuse, uv + vec2( 0.943883, -0.330279 ) * sampleSize );
if( tmp.b < color.b ) color = tmp;
add += tmp = texture2D( tDiffuse, uv + vec2( 0.330279, -0.943883 ) * sampleSize );
if( tmp.b < color.b ) color = tmp;
add += tmp = texture2D( tDiffuse, uv + vec2( -0.532032, -0.846724 ) * sampleSize );
if( tmp.b < color.b ) color = tmp;
add += tmp = texture2D( tDiffuse, uv + vec2( -0.993712, -0.111964 ) * sampleSize );
if( tmp.b < color.b ) color = tmp;
add += tmp = texture2D( tDiffuse, uv + vec2( -0.707107, 0.707107 ) * sampleSize );
if( tmp.b < color.b ) color = tmp;
color = color * vec4( 2.0 ) - ( add / vec4( 8.0 ) );
color = color + ( add / vec4( 8.0 ) - color ) * ( vec4( 1.0 ) - vec4( sample_dist * 0.5 ) );
gl_FragColor = vec4( color.rgb * color.rgb * vec3( 0.95 ) + color.rgb, 1.0 );
}`
};
export { FocusShader };

View File

@ -0,0 +1,96 @@
import {
Vector2
} from 'three';
/**
* Edge Detection Shader using Frei-Chen filter
* Based on http://rastergrid.com/blog/2011/01/frei-chen-edge-detector
*
* aspect: vec2 of (1/width, 1/height)
*/
const FreiChenShader = {
name: 'FreiChenShader',
uniforms: {
'tDiffuse': { value: null },
'aspect': { value: new Vector2( 512, 512 ) }
},
vertexShader: /* glsl */`
varying vec2 vUv;
void main() {
vUv = uv;
gl_Position = projectionMatrix * modelViewMatrix * vec4( position, 1.0 );
}`,
fragmentShader: /* glsl */`
uniform sampler2D tDiffuse;
varying vec2 vUv;
uniform vec2 aspect;
vec2 texel = vec2( 1.0 / aspect.x, 1.0 / aspect.y );
mat3 G[9];
// hard coded matrix values!!!! as suggested in https://github.com/neilmendoza/ofxPostProcessing/blob/master/src/EdgePass.cpp#L45
const mat3 g0 = mat3( 0.3535533845424652, 0, -0.3535533845424652, 0.5, 0, -0.5, 0.3535533845424652, 0, -0.3535533845424652 );
const mat3 g1 = mat3( 0.3535533845424652, 0.5, 0.3535533845424652, 0, 0, 0, -0.3535533845424652, -0.5, -0.3535533845424652 );
const mat3 g2 = mat3( 0, 0.3535533845424652, -0.5, -0.3535533845424652, 0, 0.3535533845424652, 0.5, -0.3535533845424652, 0 );
const mat3 g3 = mat3( 0.5, -0.3535533845424652, 0, -0.3535533845424652, 0, 0.3535533845424652, 0, 0.3535533845424652, -0.5 );
const mat3 g4 = mat3( 0, -0.5, 0, 0.5, 0, 0.5, 0, -0.5, 0 );
const mat3 g5 = mat3( -0.5, 0, 0.5, 0, 0, 0, 0.5, 0, -0.5 );
const mat3 g6 = mat3( 0.1666666716337204, -0.3333333432674408, 0.1666666716337204, -0.3333333432674408, 0.6666666865348816, -0.3333333432674408, 0.1666666716337204, -0.3333333432674408, 0.1666666716337204 );
const mat3 g7 = mat3( -0.3333333432674408, 0.1666666716337204, -0.3333333432674408, 0.1666666716337204, 0.6666666865348816, 0.1666666716337204, -0.3333333432674408, 0.1666666716337204, -0.3333333432674408 );
const mat3 g8 = mat3( 0.3333333432674408, 0.3333333432674408, 0.3333333432674408, 0.3333333432674408, 0.3333333432674408, 0.3333333432674408, 0.3333333432674408, 0.3333333432674408, 0.3333333432674408 );
void main(void)
{
G[0] = g0,
G[1] = g1,
G[2] = g2,
G[3] = g3,
G[4] = g4,
G[5] = g5,
G[6] = g6,
G[7] = g7,
G[8] = g8;
mat3 I;
float cnv[9];
vec3 sample;
/* fetch the 3x3 neighbourhood and use the RGB vector's length as intensity value */
for (float i=0.0; i<3.0; i++) {
for (float j=0.0; j<3.0; j++) {
sample = texture2D(tDiffuse, vUv + texel * vec2(i-1.0,j-1.0) ).rgb;
I[int(i)][int(j)] = length(sample);
}
}
/* calculate the convolution values for all the masks */
for (int i=0; i<9; i++) {
float dp3 = dot(G[i][0], I[0]) + dot(G[i][1], I[1]) + dot(G[i][2], I[2]);
cnv[i] = dp3 * dp3;
}
float M = (cnv[0] + cnv[1]) + (cnv[2] + cnv[3]);
float S = (cnv[4] + cnv[5]) + (cnv[6] + cnv[7]) + (cnv[8] + M);
gl_FragColor = vec4(vec3(sqrt(M/S)), 1.0);
}`
};
export { FreiChenShader };

View File

@ -0,0 +1,424 @@
import {
DataTexture,
Matrix4,
RepeatWrapping,
Vector2,
Vector3,
} from 'three';
/**
* References:
* - implemented algorithm - GTAO
* - https://iryoku.com/downloads/Practical-Realtime-Strategies-for-Accurate-Indirect-Occlusion.pdf
* - https://github.com/Patapom/GodComplex/blob/master/Tests/TestHBIL/2018%20Mayaux%20-%20Horizon-Based%20Indirect%20Lighting%20(HBIL).pdf
*
* - other AO algorithms that are not implemented here:
* - Screen Space Ambient Occlusion (SSAO), see also SSAOShader.js
* - http://john-chapman-graphics.blogspot.com/2013/01/ssao-tutorial.html
* - https://learnopengl.com/Advanced-Lighting/SSAO
* - https://creativecoding.soe.ucsc.edu/courses/cmpm164/_schedule/AmbientOcclusion.pdf
* - https://drive.google.com/file/d/1SyagcEVplIm2KkRD3WQYSO9O0Iyi1hfy/edit
* - Scalable Ambient Occlusion (SAO), see also SAOShader.js
* - https://casual-effects.com/research/McGuire2012SAO/index.html
* - https://research.nvidia.com/sites/default/files/pubs/2012-06_Scalable-Ambient-Obscurance/McGuire12SAO.pdf
* - N8HO
* - https://github.com/N8python/n8ao
* - Horizon Based Ambient Occlusion (HBAO)
* - http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.577.2286&rep=rep1&type=pdf
* - https://www.derschmale.com/2013/12/20/an-alternative-implementation-for-hbao-2/
*
* - further reading
* - https://ceur-ws.org/Vol-3027/paper5.pdf
* - https://www.comp.nus.edu.sg/~lowkl/publications/mssao_visual_computer_2012.pdf
* - https://web.ics.purdue.edu/~tmcgraw/papers/mcgraw-ao-2008.pdf
* - https://www.activision.com/cdn/research/Practical_Real_Time_Strategies_for_Accurate_Indirect_Occlusion_NEW%20VERSION_COLOR.pdf
* - https://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.390.2463&rep=rep1&type=pdf
* - https://www.intel.com/content/www/us/en/developer/articles/technical/adaptive-screen-space-ambient-occlusion.html
*/
const GTAOShader = {
name: 'GTAOShader',
defines: {
PERSPECTIVE_CAMERA: 1,
SAMPLES: 16,
NORMAL_VECTOR_TYPE: 1,
DEPTH_SWIZZLING: 'x',
SCREEN_SPACE_RADIUS: 0,
SCREEN_SPACE_RADIUS_SCALE: 100.0,
SCENE_CLIP_BOX: 0,
},
uniforms: {
tNormal: { value: null },
tDepth: { value: null },
tNoise: { value: null },
resolution: { value: new Vector2() },
cameraNear: { value: null },
cameraFar: { value: null },
cameraProjectionMatrix: { value: new Matrix4() },
cameraProjectionMatrixInverse: { value: new Matrix4() },
cameraWorldMatrix: { value: new Matrix4() },
radius: { value: 0.25 },
distanceExponent: { value: 1. },
thickness: { value: 1. },
distanceFallOff: { value: 1. },
scale: { value: 1. },
sceneBoxMin: { value: new Vector3( - 1, - 1, - 1 ) },
sceneBoxMax: { value: new Vector3( 1, 1, 1 ) },
},
vertexShader: /* glsl */`
varying vec2 vUv;
void main() {
vUv = uv;
gl_Position = projectionMatrix * modelViewMatrix * vec4( position, 1.0 );
}`,
fragmentShader: /* glsl */`
varying vec2 vUv;
uniform highp sampler2D tNormal;
uniform highp sampler2D tDepth;
uniform sampler2D tNoise;
uniform vec2 resolution;
uniform float cameraNear;
uniform float cameraFar;
uniform mat4 cameraProjectionMatrix;
uniform mat4 cameraProjectionMatrixInverse;
uniform mat4 cameraWorldMatrix;
uniform float radius;
uniform float distanceExponent;
uniform float thickness;
uniform float distanceFallOff;
uniform float scale;
#if SCENE_CLIP_BOX == 1
uniform vec3 sceneBoxMin;
uniform vec3 sceneBoxMax;
#endif
#include <common>
#include <packing>
#ifndef FRAGMENT_OUTPUT
#define FRAGMENT_OUTPUT vec4(vec3(ao), 1.)
#endif
vec3 getViewPosition(const in vec2 screenPosition, const in float depth) {
vec4 clipSpacePosition = vec4(vec3(screenPosition, depth) * 2.0 - 1.0, 1.0);
vec4 viewSpacePosition = cameraProjectionMatrixInverse * clipSpacePosition;
return viewSpacePosition.xyz / viewSpacePosition.w;
}
float getDepth(const vec2 uv) {
return textureLod(tDepth, uv.xy, 0.0).DEPTH_SWIZZLING;
}
float fetchDepth(const ivec2 uv) {
return texelFetch(tDepth, uv.xy, 0).DEPTH_SWIZZLING;
}
float getViewZ(const in float depth) {
#if PERSPECTIVE_CAMERA == 1
return perspectiveDepthToViewZ(depth, cameraNear, cameraFar);
#else
return orthographicDepthToViewZ(depth, cameraNear, cameraFar);
#endif
}
vec3 computeNormalFromDepth(const vec2 uv) {
vec2 size = vec2(textureSize(tDepth, 0));
ivec2 p = ivec2(uv * size);
float c0 = fetchDepth(p);
float l2 = fetchDepth(p - ivec2(2, 0));
float l1 = fetchDepth(p - ivec2(1, 0));
float r1 = fetchDepth(p + ivec2(1, 0));
float r2 = fetchDepth(p + ivec2(2, 0));
float b2 = fetchDepth(p - ivec2(0, 2));
float b1 = fetchDepth(p - ivec2(0, 1));
float t1 = fetchDepth(p + ivec2(0, 1));
float t2 = fetchDepth(p + ivec2(0, 2));
float dl = abs((2.0 * l1 - l2) - c0);
float dr = abs((2.0 * r1 - r2) - c0);
float db = abs((2.0 * b1 - b2) - c0);
float dt = abs((2.0 * t1 - t2) - c0);
vec3 ce = getViewPosition(uv, c0).xyz;
vec3 dpdx = (dl < dr) ? ce - getViewPosition((uv - vec2(1.0 / size.x, 0.0)), l1).xyz : -ce + getViewPosition((uv + vec2(1.0 / size.x, 0.0)), r1).xyz;
vec3 dpdy = (db < dt) ? ce - getViewPosition((uv - vec2(0.0, 1.0 / size.y)), b1).xyz : -ce + getViewPosition((uv + vec2(0.0, 1.0 / size.y)), t1).xyz;
return normalize(cross(dpdx, dpdy));
}
vec3 getViewNormal(const vec2 uv) {
#if NORMAL_VECTOR_TYPE == 2
return normalize(textureLod(tNormal, uv, 0.).rgb);
#elif NORMAL_VECTOR_TYPE == 1
return unpackRGBToNormal(textureLod(tNormal, uv, 0.).rgb);
#else
return computeNormalFromDepth(uv);
#endif
}
vec3 getSceneUvAndDepth(vec3 sampleViewPos) {
vec4 sampleClipPos = cameraProjectionMatrix * vec4(sampleViewPos, 1.);
vec2 sampleUv = sampleClipPos.xy / sampleClipPos.w * 0.5 + 0.5;
float sampleSceneDepth = getDepth(sampleUv);
return vec3(sampleUv, sampleSceneDepth);
}
void main() {
float depth = getDepth(vUv.xy);
if (depth >= 1.0) {
discard;
return;
}
vec3 viewPos = getViewPosition(vUv, depth);
vec3 viewNormal = getViewNormal(vUv);
float radiusToUse = radius;
float distanceFalloffToUse = thickness;
#if SCREEN_SPACE_RADIUS == 1
float radiusScale = getViewPosition(vec2(0.5 + float(SCREEN_SPACE_RADIUS_SCALE) / resolution.x, 0.0), depth).x;
radiusToUse *= radiusScale;
distanceFalloffToUse *= radiusScale;
#endif
#if SCENE_CLIP_BOX == 1
vec3 worldPos = (cameraWorldMatrix * vec4(viewPos, 1.0)).xyz;
float boxDistance = length(max(vec3(0.0), max(sceneBoxMin - worldPos, worldPos - sceneBoxMax)));
if (boxDistance > radiusToUse) {
discard;
return;
}
#endif
vec2 noiseResolution = vec2(textureSize(tNoise, 0));
vec2 noiseUv = vUv * resolution / noiseResolution;
vec4 noiseTexel = textureLod(tNoise, noiseUv, 0.0);
vec3 randomVec = noiseTexel.xyz * 2.0 - 1.0;
vec3 tangent = normalize(vec3(randomVec.xy, 0.));
vec3 bitangent = vec3(-tangent.y, tangent.x, 0.);
mat3 kernelMatrix = mat3(tangent, bitangent, vec3(0., 0., 1.));
const int DIRECTIONS = SAMPLES < 30 ? 3 : 5;
const int STEPS = (SAMPLES + DIRECTIONS - 1) / DIRECTIONS;
float ao = 0.0, totalWeight = 0.0;
for (int i = 0; i < DIRECTIONS; ++i) {
float angle = float(i) / float(DIRECTIONS) * PI;
vec4 sampleDir = vec4(cos(angle), sin(angle), 0., 0.5 + 0.5 * noiseTexel.w);
sampleDir.xyz = normalize(kernelMatrix * sampleDir.xyz);
vec3 viewDir = normalize(-viewPos.xyz);
vec3 sliceBitangent = normalize(cross(sampleDir.xyz, viewDir));
vec3 sliceTangent = cross(sliceBitangent, viewDir);
vec3 normalInSlice = normalize(viewNormal - sliceBitangent * dot(viewNormal, sliceBitangent));
vec3 tangentToNormalInSlice = cross(normalInSlice, sliceBitangent);
vec2 cosHorizons = vec2(dot(viewDir, tangentToNormalInSlice), dot(viewDir, -tangentToNormalInSlice));
for (int j = 0; j < STEPS; ++j) {
vec3 sampleViewOffset = sampleDir.xyz * radiusToUse * sampleDir.w * pow(float(j + 1) / float(STEPS), distanceExponent);
vec3 sampleSceneUvDepth = getSceneUvAndDepth(viewPos + sampleViewOffset);
vec3 sampleSceneViewPos = getViewPosition(sampleSceneUvDepth.xy, sampleSceneUvDepth.z);
vec3 viewDelta = sampleSceneViewPos - viewPos;
if (abs(viewDelta.z) < thickness) {
float sampleCosHorizon = dot(viewDir, normalize(viewDelta));
cosHorizons.x += max(0., (sampleCosHorizon - cosHorizons.x) * mix(1., 2. / float(j + 2), distanceFallOff));
}
sampleSceneUvDepth = getSceneUvAndDepth(viewPos - sampleViewOffset);
sampleSceneViewPos = getViewPosition(sampleSceneUvDepth.xy, sampleSceneUvDepth.z);
viewDelta = sampleSceneViewPos - viewPos;
if (abs(viewDelta.z) < thickness) {
float sampleCosHorizon = dot(viewDir, normalize(viewDelta));
cosHorizons.y += max(0., (sampleCosHorizon - cosHorizons.y) * mix(1., 2. / float(j + 2), distanceFallOff));
}
}
vec2 sinHorizons = sqrt(1. - cosHorizons * cosHorizons);
float nx = dot(normalInSlice, sliceTangent);
float ny = dot(normalInSlice, viewDir);
float nxb = 1. / 2. * (acos(cosHorizons.y) - acos(cosHorizons.x) + sinHorizons.x * cosHorizons.x - sinHorizons.y * cosHorizons.y);
float nyb = 1. / 2. * (2. - cosHorizons.x * cosHorizons.x - cosHorizons.y * cosHorizons.y);
float occlusion = nx * nxb + ny * nyb;
ao += occlusion;
}
ao = clamp(ao / float(DIRECTIONS), 0., 1.);
#if SCENE_CLIP_BOX == 1
ao = mix(ao, 1., smoothstep(0., radiusToUse, boxDistance));
#endif
ao = pow(ao, scale);
gl_FragColor = FRAGMENT_OUTPUT;
}`
};
const GTAODepthShader = {
name: 'GTAODepthShader',
defines: {
PERSPECTIVE_CAMERA: 1
},
uniforms: {
tDepth: { value: null },
cameraNear: { value: null },
cameraFar: { value: null },
},
vertexShader: /* glsl */`
varying vec2 vUv;
void main() {
vUv = uv;
gl_Position = projectionMatrix * modelViewMatrix * vec4( position, 1.0 );
}`,
fragmentShader: /* glsl */`
uniform sampler2D tDepth;
uniform float cameraNear;
uniform float cameraFar;
varying vec2 vUv;
#include <packing>
float getLinearDepth( const in vec2 screenPosition ) {
#if PERSPECTIVE_CAMERA == 1
float fragCoordZ = texture2D( tDepth, screenPosition ).x;
float viewZ = perspectiveDepthToViewZ( fragCoordZ, cameraNear, cameraFar );
return viewZToOrthographicDepth( viewZ, cameraNear, cameraFar );
#else
return texture2D( tDepth, screenPosition ).x;
#endif
}
void main() {
float depth = getLinearDepth( vUv );
gl_FragColor = vec4( vec3( 1.0 - depth ), 1.0 );
}`
};
const GTAOBlendShader = {
name: 'GTAOBlendShader',
uniforms: {
tDiffuse: { value: null },
intensity: { value: 1.0 }
},
vertexShader: /* glsl */`
varying vec2 vUv;
void main() {
vUv = uv;
gl_Position = projectionMatrix * modelViewMatrix * vec4( position, 1.0 );
}`,
fragmentShader: /* glsl */`
uniform float intensity;
uniform sampler2D tDiffuse;
varying vec2 vUv;
void main() {
vec4 texel = texture2D( tDiffuse, vUv );
gl_FragColor = vec4(mix(vec3(1.), texel.rgb, intensity), texel.a);
}`
};
function generateMagicSquareNoise( size = 5 ) {
const noiseSize = Math.floor( size ) % 2 === 0 ? Math.floor( size ) + 1 : Math.floor( size );
const magicSquare = generateMagicSquare( noiseSize );
const noiseSquareSize = magicSquare.length;
const data = new Uint8Array( noiseSquareSize * 4 );
for ( let inx = 0; inx < noiseSquareSize; ++ inx ) {
const iAng = magicSquare[ inx ];
const angle = ( 2 * Math.PI * iAng ) / noiseSquareSize;
const randomVec = new Vector3(
Math.cos( angle ),
Math.sin( angle ),
0
).normalize();
data[ inx * 4 ] = ( randomVec.x * 0.5 + 0.5 ) * 255;
data[ inx * 4 + 1 ] = ( randomVec.y * 0.5 + 0.5 ) * 255;
data[ inx * 4 + 2 ] = 127;
data[ inx * 4 + 3 ] = 255;
}
const noiseTexture = new DataTexture( data, noiseSize, noiseSize );
noiseTexture.wrapS = RepeatWrapping;
noiseTexture.wrapT = RepeatWrapping;
noiseTexture.needsUpdate = true;
return noiseTexture;
}
function generateMagicSquare( size ) {
const noiseSize = Math.floor( size ) % 2 === 0 ? Math.floor( size ) + 1 : Math.floor( size );
const noiseSquareSize = noiseSize * noiseSize;
const magicSquare = Array( noiseSquareSize ).fill( 0 );
let i = Math.floor( noiseSize / 2 );
let j = noiseSize - 1;
for ( let num = 1; num <= noiseSquareSize; ) {
if ( i === - 1 && j === noiseSize ) {
j = noiseSize - 2;
i = 0;
} else {
if ( j === noiseSize ) {
j = 0;
}
if ( i < 0 ) {
i = noiseSize - 1;
}
}
if ( magicSquare[ i * noiseSize + j ] !== 0 ) {
j -= 2;
i ++;
continue;
} else {
magicSquare[ i * noiseSize + j ] = num ++;
}
j ++;
i --;
}
return magicSquare;
}
export { generateMagicSquareNoise, GTAOShader, GTAODepthShader, GTAOBlendShader };

View File

@ -0,0 +1,43 @@
/**
* Gamma Correction Shader
* http://en.wikipedia.org/wiki/gamma_correction
*/
const GammaCorrectionShader = {
name: 'GammaCorrectionShader',
uniforms: {
'tDiffuse': { value: null }
},
vertexShader: /* glsl */`
varying vec2 vUv;
void main() {
vUv = uv;
gl_Position = projectionMatrix * modelViewMatrix * vec4( position, 1.0 );
}`,
fragmentShader: /* glsl */`
uniform sampler2D tDiffuse;
varying vec2 vUv;
void main() {
vec4 tex = texture2D( tDiffuse, vUv );
gl_FragColor = sRGBTransferOETF( tex );
}`
};
export { GammaCorrectionShader };

View File

@ -0,0 +1,321 @@
import {
Color,
Vector3
} from 'three';
/**
* God-rays (crepuscular rays)
*
* Similar implementation to the one used by Crytek for CryEngine 2 [Sousa2008].
* Blurs a mask generated from the depth map along radial lines emanating from the light
* source. The blur repeatedly applies a blur filter of increasing support but constant
* sample count to produce a blur filter with large support.
*
* My implementation performs 3 passes, similar to the implementation from Sousa. I found
* just 6 samples per pass produced acceptible results. The blur is applied three times,
* with decreasing filter support. The result is equivalent to a single pass with
* 6*6*6 = 216 samples.
*
* References:
*
* Sousa2008 - Crysis Next Gen Effects, GDC2008, http://www.crytek.com/sites/default/files/GDC08_SousaT_CrysisEffects.ppt
*/
const GodRaysDepthMaskShader = {
name: 'GodRaysDepthMaskShader',
uniforms: {
tInput: {
value: null
}
},
vertexShader: /* glsl */`
varying vec2 vUv;
void main() {
vUv = uv;
gl_Position = projectionMatrix * modelViewMatrix * vec4( position, 1.0 );
}`,
fragmentShader: /* glsl */`
varying vec2 vUv;
uniform sampler2D tInput;
void main() {
gl_FragColor = vec4( 1.0 ) - texture2D( tInput, vUv );
}`
};
/**
* The god-ray generation shader.
*
* First pass:
*
* The depth map is blurred along radial lines towards the "sun". The
* output is written to a temporary render target (I used a 1/4 sized
* target).
*
* Pass two & three:
*
* The results of the previous pass are re-blurred, each time with a
* decreased distance between samples.
*/
const GodRaysGenerateShader = {
name: 'GodRaysGenerateShader',
uniforms: {
tInput: {
value: null
},
fStepSize: {
value: 1.0
},
vSunPositionScreenSpace: {
value: new Vector3()
}
},
vertexShader: /* glsl */`
varying vec2 vUv;
void main() {
vUv = uv;
gl_Position = projectionMatrix * modelViewMatrix * vec4( position, 1.0 );
}`,
fragmentShader: /* glsl */`
#define TAPS_PER_PASS 6.0
varying vec2 vUv;
uniform sampler2D tInput;
uniform vec3 vSunPositionScreenSpace;
uniform float fStepSize; // filter step size
void main() {
// delta from current pixel to "sun" position
vec2 delta = vSunPositionScreenSpace.xy - vUv;
float dist = length( delta );
// Step vector (uv space)
vec2 stepv = fStepSize * delta / dist;
// Number of iterations between pixel and sun
float iters = dist/fStepSize;
vec2 uv = vUv.xy;
float col = 0.0;
// This breaks ANGLE in Chrome 22
// - see http://code.google.com/p/chromium/issues/detail?id=153105
/*
// Unrolling didnt do much on my hardware (ATI Mobility Radeon 3450),
// so i've just left the loop
"for ( float i = 0.0; i < TAPS_PER_PASS; i += 1.0 ) {",
// Accumulate samples, making sure we dont walk past the light source.
// The check for uv.y < 1 would not be necessary with "border" UV wrap
// mode, with a black border color. I don't think this is currently
// exposed by three.js. As a result there might be artifacts when the
// sun is to the left, right or bottom of screen as these cases are
// not specifically handled.
" col += ( i <= iters && uv.y < 1.0 ? texture2D( tInput, uv ).r : 0.0 );",
" uv += stepv;",
"}",
*/
// Unrolling loop manually makes it work in ANGLE
float f = min( 1.0, max( vSunPositionScreenSpace.z / 1000.0, 0.0 ) ); // used to fade out godrays
if ( 0.0 <= iters && uv.y < 1.0 ) col += texture2D( tInput, uv ).r * f;
uv += stepv;
if ( 1.0 <= iters && uv.y < 1.0 ) col += texture2D( tInput, uv ).r * f;
uv += stepv;
if ( 2.0 <= iters && uv.y < 1.0 ) col += texture2D( tInput, uv ).r * f;
uv += stepv;
if ( 3.0 <= iters && uv.y < 1.0 ) col += texture2D( tInput, uv ).r * f;
uv += stepv;
if ( 4.0 <= iters && uv.y < 1.0 ) col += texture2D( tInput, uv ).r * f;
uv += stepv;
if ( 5.0 <= iters && uv.y < 1.0 ) col += texture2D( tInput, uv ).r * f;
uv += stepv;
// Should technically be dividing by 'iters but 'TAPS_PER_PASS' smooths out
// objectionable artifacts, in particular near the sun position. The side
// effect is that the result is darker than it should be around the sun, as
// TAPS_PER_PASS is greater than the number of samples actually accumulated.
// When the result is inverted (in the shader 'godrays_combine this produces
// a slight bright spot at the position of the sun, even when it is occluded.
gl_FragColor = vec4( col/TAPS_PER_PASS );
gl_FragColor.a = 1.0;
}`
};
/**
* Additively applies god rays from texture tGodRays to a background (tColors).
* fGodRayIntensity attenuates the god rays.
*/
const GodRaysCombineShader = {
name: 'GodRaysCombineShader',
uniforms: {
tColors: {
value: null
},
tGodRays: {
value: null
},
fGodRayIntensity: {
value: 0.69
}
},
vertexShader: /* glsl */`
varying vec2 vUv;
void main() {
vUv = uv;
gl_Position = projectionMatrix * modelViewMatrix * vec4( position, 1.0 );
}`,
fragmentShader: /* glsl */`
varying vec2 vUv;
uniform sampler2D tColors;
uniform sampler2D tGodRays;
uniform float fGodRayIntensity;
void main() {
// Since THREE.MeshDepthMaterial renders foreground objects white and background
// objects black, the god-rays will be white streaks. Therefore value is inverted
// before being combined with tColors
gl_FragColor = texture2D( tColors, vUv ) + fGodRayIntensity * vec4( 1.0 - texture2D( tGodRays, vUv ).r );
gl_FragColor.a = 1.0;
}`
};
/**
* A dodgy sun/sky shader. Makes a bright spot at the sun location. Would be
* cheaper/faster/simpler to implement this as a simple sun sprite.
*/
const GodRaysFakeSunShader = {
name: 'GodRaysFakeSunShader',
uniforms: {
vSunPositionScreenSpace: {
value: new Vector3()
},
fAspect: {
value: 1.0
},
sunColor: {
value: new Color( 0xffee00 )
},
bgColor: {
value: new Color( 0x000000 )
}
},
vertexShader: /* glsl */`
varying vec2 vUv;
void main() {
vUv = uv;
gl_Position = projectionMatrix * modelViewMatrix * vec4( position, 1.0 );
}`,
fragmentShader: /* glsl */`
varying vec2 vUv;
uniform vec3 vSunPositionScreenSpace;
uniform float fAspect;
uniform vec3 sunColor;
uniform vec3 bgColor;
void main() {
vec2 diff = vUv - vSunPositionScreenSpace.xy;
// Correct for aspect ratio
diff.x *= fAspect;
float prop = clamp( length( diff ) / 0.5, 0.0, 1.0 );
prop = 0.35 * pow( 1.0 - prop, 3.0 );
gl_FragColor.xyz = ( vSunPositionScreenSpace.z > 0.0 ) ? mix( sunColor, bgColor, 1.0 - prop ) : bgColor;
gl_FragColor.w = 1.0;
}`
};
export { GodRaysDepthMaskShader, GodRaysGenerateShader, GodRaysCombineShader, GodRaysFakeSunShader };

View File

@ -0,0 +1,312 @@
/**
* RGB Halftone shader for three.js.
* NOTE:
* Shape (1 = Dot, 2 = Ellipse, 3 = Line, 4 = Square)
* Blending Mode (1 = Linear, 2 = Multiply, 3 = Add, 4 = Lighter, 5 = Darker)
*/
const HalftoneShader = {
name: 'HalftoneShader',
uniforms: {
'tDiffuse': { value: null },
'shape': { value: 1 },
'radius': { value: 4 },
'rotateR': { value: Math.PI / 12 * 1 },
'rotateG': { value: Math.PI / 12 * 2 },
'rotateB': { value: Math.PI / 12 * 3 },
'scatter': { value: 0 },
'width': { value: 1 },
'height': { value: 1 },
'blending': { value: 1 },
'blendingMode': { value: 1 },
'greyscale': { value: false },
'disable': { value: false }
},
vertexShader: /* glsl */`
varying vec2 vUV;
void main() {
vUV = uv;
gl_Position = projectionMatrix * modelViewMatrix * vec4(position, 1.0);
}`,
fragmentShader: /* glsl */`
#define SQRT2_MINUS_ONE 0.41421356
#define SQRT2_HALF_MINUS_ONE 0.20710678
#define PI2 6.28318531
#define SHAPE_DOT 1
#define SHAPE_ELLIPSE 2
#define SHAPE_LINE 3
#define SHAPE_SQUARE 4
#define BLENDING_LINEAR 1
#define BLENDING_MULTIPLY 2
#define BLENDING_ADD 3
#define BLENDING_LIGHTER 4
#define BLENDING_DARKER 5
uniform sampler2D tDiffuse;
uniform float radius;
uniform float rotateR;
uniform float rotateG;
uniform float rotateB;
uniform float scatter;
uniform float width;
uniform float height;
uniform int shape;
uniform bool disable;
uniform float blending;
uniform int blendingMode;
varying vec2 vUV;
uniform bool greyscale;
const int samples = 8;
float blend( float a, float b, float t ) {
// linear blend
return a * ( 1.0 - t ) + b * t;
}
float hypot( float x, float y ) {
// vector magnitude
return sqrt( x * x + y * y );
}
float rand( vec2 seed ){
// get pseudo-random number
return fract( sin( dot( seed.xy, vec2( 12.9898, 78.233 ) ) ) * 43758.5453 );
}
float distanceToDotRadius( float channel, vec2 coord, vec2 normal, vec2 p, float angle, float rad_max ) {
// apply shape-specific transforms
float dist = hypot( coord.x - p.x, coord.y - p.y );
float rad = channel;
if ( shape == SHAPE_DOT ) {
rad = pow( abs( rad ), 1.125 ) * rad_max;
} else if ( shape == SHAPE_ELLIPSE ) {
rad = pow( abs( rad ), 1.125 ) * rad_max;
if ( dist != 0.0 ) {
float dot_p = abs( ( p.x - coord.x ) / dist * normal.x + ( p.y - coord.y ) / dist * normal.y );
dist = ( dist * ( 1.0 - SQRT2_HALF_MINUS_ONE ) ) + dot_p * dist * SQRT2_MINUS_ONE;
}
} else if ( shape == SHAPE_LINE ) {
rad = pow( abs( rad ), 1.5) * rad_max;
float dot_p = ( p.x - coord.x ) * normal.x + ( p.y - coord.y ) * normal.y;
dist = hypot( normal.x * dot_p, normal.y * dot_p );
} else if ( shape == SHAPE_SQUARE ) {
float theta = atan( p.y - coord.y, p.x - coord.x ) - angle;
float sin_t = abs( sin( theta ) );
float cos_t = abs( cos( theta ) );
rad = pow( abs( rad ), 1.4 );
rad = rad_max * ( rad + ( ( sin_t > cos_t ) ? rad - sin_t * rad : rad - cos_t * rad ) );
}
return rad - dist;
}
struct Cell {
// grid sample positions
vec2 normal;
vec2 p1;
vec2 p2;
vec2 p3;
vec2 p4;
float samp2;
float samp1;
float samp3;
float samp4;
};
vec4 getSample( vec2 point ) {
// multi-sampled point
vec4 tex = texture2D( tDiffuse, vec2( point.x / width, point.y / height ) );
float base = rand( vec2( floor( point.x ), floor( point.y ) ) ) * PI2;
float step = PI2 / float( samples );
float dist = radius * 0.66;
for ( int i = 0; i < samples; ++i ) {
float r = base + step * float( i );
vec2 coord = point + vec2( cos( r ) * dist, sin( r ) * dist );
tex += texture2D( tDiffuse, vec2( coord.x / width, coord.y / height ) );
}
tex /= float( samples ) + 1.0;
return tex;
}
float getDotColour( Cell c, vec2 p, int channel, float angle, float aa ) {
// get colour for given point
float dist_c_1, dist_c_2, dist_c_3, dist_c_4, res;
if ( channel == 0 ) {
c.samp1 = getSample( c.p1 ).r;
c.samp2 = getSample( c.p2 ).r;
c.samp3 = getSample( c.p3 ).r;
c.samp4 = getSample( c.p4 ).r;
} else if (channel == 1) {
c.samp1 = getSample( c.p1 ).g;
c.samp2 = getSample( c.p2 ).g;
c.samp3 = getSample( c.p3 ).g;
c.samp4 = getSample( c.p4 ).g;
} else {
c.samp1 = getSample( c.p1 ).b;
c.samp3 = getSample( c.p3 ).b;
c.samp2 = getSample( c.p2 ).b;
c.samp4 = getSample( c.p4 ).b;
}
dist_c_1 = distanceToDotRadius( c.samp1, c.p1, c.normal, p, angle, radius );
dist_c_2 = distanceToDotRadius( c.samp2, c.p2, c.normal, p, angle, radius );
dist_c_3 = distanceToDotRadius( c.samp3, c.p3, c.normal, p, angle, radius );
dist_c_4 = distanceToDotRadius( c.samp4, c.p4, c.normal, p, angle, radius );
res = ( dist_c_1 > 0.0 ) ? clamp( dist_c_1 / aa, 0.0, 1.0 ) : 0.0;
res += ( dist_c_2 > 0.0 ) ? clamp( dist_c_2 / aa, 0.0, 1.0 ) : 0.0;
res += ( dist_c_3 > 0.0 ) ? clamp( dist_c_3 / aa, 0.0, 1.0 ) : 0.0;
res += ( dist_c_4 > 0.0 ) ? clamp( dist_c_4 / aa, 0.0, 1.0 ) : 0.0;
res = clamp( res, 0.0, 1.0 );
return res;
}
Cell getReferenceCell( vec2 p, vec2 origin, float grid_angle, float step ) {
// get containing cell
Cell c;
// calc grid
vec2 n = vec2( cos( grid_angle ), sin( grid_angle ) );
float threshold = step * 0.5;
float dot_normal = n.x * ( p.x - origin.x ) + n.y * ( p.y - origin.y );
float dot_line = -n.y * ( p.x - origin.x ) + n.x * ( p.y - origin.y );
vec2 offset = vec2( n.x * dot_normal, n.y * dot_normal );
float offset_normal = mod( hypot( offset.x, offset.y ), step );
float normal_dir = ( dot_normal < 0.0 ) ? 1.0 : -1.0;
float normal_scale = ( ( offset_normal < threshold ) ? -offset_normal : step - offset_normal ) * normal_dir;
float offset_line = mod( hypot( ( p.x - offset.x ) - origin.x, ( p.y - offset.y ) - origin.y ), step );
float line_dir = ( dot_line < 0.0 ) ? 1.0 : -1.0;
float line_scale = ( ( offset_line < threshold ) ? -offset_line : step - offset_line ) * line_dir;
// get closest corner
c.normal = n;
c.p1.x = p.x - n.x * normal_scale + n.y * line_scale;
c.p1.y = p.y - n.y * normal_scale - n.x * line_scale;
// scatter
if ( scatter != 0.0 ) {
float off_mag = scatter * threshold * 0.5;
float off_angle = rand( vec2( floor( c.p1.x ), floor( c.p1.y ) ) ) * PI2;
c.p1.x += cos( off_angle ) * off_mag;
c.p1.y += sin( off_angle ) * off_mag;
}
// find corners
float normal_step = normal_dir * ( ( offset_normal < threshold ) ? step : -step );
float line_step = line_dir * ( ( offset_line < threshold ) ? step : -step );
c.p2.x = c.p1.x - n.x * normal_step;
c.p2.y = c.p1.y - n.y * normal_step;
c.p3.x = c.p1.x + n.y * line_step;
c.p3.y = c.p1.y - n.x * line_step;
c.p4.x = c.p1.x - n.x * normal_step + n.y * line_step;
c.p4.y = c.p1.y - n.y * normal_step - n.x * line_step;
return c;
}
float blendColour( float a, float b, float t ) {
// blend colours
if ( blendingMode == BLENDING_LINEAR ) {
return blend( a, b, 1.0 - t );
} else if ( blendingMode == BLENDING_ADD ) {
return blend( a, min( 1.0, a + b ), t );
} else if ( blendingMode == BLENDING_MULTIPLY ) {
return blend( a, max( 0.0, a * b ), t );
} else if ( blendingMode == BLENDING_LIGHTER ) {
return blend( a, max( a, b ), t );
} else if ( blendingMode == BLENDING_DARKER ) {
return blend( a, min( a, b ), t );
} else {
return blend( a, b, 1.0 - t );
}
}
void main() {
if ( ! disable ) {
// setup
vec2 p = vec2( vUV.x * width, vUV.y * height );
vec2 origin = vec2( 0, 0 );
float aa = ( radius < 2.5 ) ? radius * 0.5 : 1.25;
// get channel samples
Cell cell_r = getReferenceCell( p, origin, rotateR, radius );
Cell cell_g = getReferenceCell( p, origin, rotateG, radius );
Cell cell_b = getReferenceCell( p, origin, rotateB, radius );
float r = getDotColour( cell_r, p, 0, rotateR, aa );
float g = getDotColour( cell_g, p, 1, rotateG, aa );
float b = getDotColour( cell_b, p, 2, rotateB, aa );
// blend with original
vec4 colour = texture2D( tDiffuse, vUV );
r = blendColour( r, colour.r, blending );
g = blendColour( g, colour.g, blending );
b = blendColour( b, colour.b, blending );
if ( greyscale ) {
r = g = b = (r + b + g) / 3.0;
}
gl_FragColor = vec4( r, g, b, 1.0 );
} else {
gl_FragColor = texture2D( tDiffuse, vUV );
}
}`
};
export { HalftoneShader };

View File

@ -0,0 +1,59 @@
/**
* Two pass Gaussian blur filter (horizontal and vertical blur shaders)
* - see http://www.cake23.de/traveling-wavefronts-lit-up.html
*
* - 9 samples per pass
* - standard deviation 2.7
* - "h" and "v" parameters should be set to "1 / width" and "1 / height"
*/
const HorizontalBlurShader = {
name: 'HorizontalBlurShader',
uniforms: {
'tDiffuse': { value: null },
'h': { value: 1.0 / 512.0 }
},
vertexShader: /* glsl */`
varying vec2 vUv;
void main() {
vUv = uv;
gl_Position = projectionMatrix * modelViewMatrix * vec4( position, 1.0 );
}`,
fragmentShader: /* glsl */`
uniform sampler2D tDiffuse;
uniform float h;
varying vec2 vUv;
void main() {
vec4 sum = vec4( 0.0 );
sum += texture2D( tDiffuse, vec2( vUv.x - 4.0 * h, vUv.y ) ) * 0.051;
sum += texture2D( tDiffuse, vec2( vUv.x - 3.0 * h, vUv.y ) ) * 0.0918;
sum += texture2D( tDiffuse, vec2( vUv.x - 2.0 * h, vUv.y ) ) * 0.12245;
sum += texture2D( tDiffuse, vec2( vUv.x - 1.0 * h, vUv.y ) ) * 0.1531;
sum += texture2D( tDiffuse, vec2( vUv.x, vUv.y ) ) * 0.1633;
sum += texture2D( tDiffuse, vec2( vUv.x + 1.0 * h, vUv.y ) ) * 0.1531;
sum += texture2D( tDiffuse, vec2( vUv.x + 2.0 * h, vUv.y ) ) * 0.12245;
sum += texture2D( tDiffuse, vec2( vUv.x + 3.0 * h, vUv.y ) ) * 0.0918;
sum += texture2D( tDiffuse, vec2( vUv.x + 4.0 * h, vUv.y ) ) * 0.051;
gl_FragColor = sum;
}`
};
export { HorizontalBlurShader };

View File

@ -0,0 +1,63 @@
/**
* Simple fake tilt-shift effect, modulating two pass Gaussian blur (see above) by vertical position
*
* - 9 samples per pass
* - standard deviation 2.7
* - "h" and "v" parameters should be set to "1 / width" and "1 / height"
* - "r" parameter control where "focused" horizontal line lies
*/
const HorizontalTiltShiftShader = {
name: 'HorizontalTiltShiftShader',
uniforms: {
'tDiffuse': { value: null },
'h': { value: 1.0 / 512.0 },
'r': { value: 0.35 }
},
vertexShader: /* glsl */`
varying vec2 vUv;
void main() {
vUv = uv;
gl_Position = projectionMatrix * modelViewMatrix * vec4( position, 1.0 );
}`,
fragmentShader: /* glsl */`
uniform sampler2D tDiffuse;
uniform float h;
uniform float r;
varying vec2 vUv;
void main() {
vec4 sum = vec4( 0.0 );
float hh = h * abs( r - vUv.y );
sum += texture2D( tDiffuse, vec2( vUv.x - 4.0 * hh, vUv.y ) ) * 0.051;
sum += texture2D( tDiffuse, vec2( vUv.x - 3.0 * hh, vUv.y ) ) * 0.0918;
sum += texture2D( tDiffuse, vec2( vUv.x - 2.0 * hh, vUv.y ) ) * 0.12245;
sum += texture2D( tDiffuse, vec2( vUv.x - 1.0 * hh, vUv.y ) ) * 0.1531;
sum += texture2D( tDiffuse, vec2( vUv.x, vUv.y ) ) * 0.1633;
sum += texture2D( tDiffuse, vec2( vUv.x + 1.0 * hh, vUv.y ) ) * 0.1531;
sum += texture2D( tDiffuse, vec2( vUv.x + 2.0 * hh, vUv.y ) ) * 0.12245;
sum += texture2D( tDiffuse, vec2( vUv.x + 3.0 * hh, vUv.y ) ) * 0.0918;
sum += texture2D( tDiffuse, vec2( vUv.x + 4.0 * hh, vUv.y ) ) * 0.051;
gl_FragColor = sum;
}`
};
export { HorizontalTiltShiftShader };

View File

@ -0,0 +1,67 @@
/**
* Hue and saturation adjustment
* https://github.com/evanw/glfx.js
* hue: -1 to 1 (-1 is 180 degrees in the negative direction, 0 is no change, etc.
* saturation: -1 to 1 (-1 is solid gray, 0 is no change, and 1 is maximum contrast)
*/
const HueSaturationShader = {
name: 'HueSaturationShader',
uniforms: {
'tDiffuse': { value: null },
'hue': { value: 0 },
'saturation': { value: 0 }
},
vertexShader: /* glsl */`
varying vec2 vUv;
void main() {
vUv = uv;
gl_Position = projectionMatrix * modelViewMatrix * vec4( position, 1.0 );
}`,
fragmentShader: /* glsl */`
uniform sampler2D tDiffuse;
uniform float hue;
uniform float saturation;
varying vec2 vUv;
void main() {
gl_FragColor = texture2D( tDiffuse, vUv );
// hue
float angle = hue * 3.14159265;
float s = sin(angle), c = cos(angle);
vec3 weights = (vec3(2.0 * c, -sqrt(3.0) * s - c, sqrt(3.0) * s - c) + 1.0) / 3.0;
float len = length(gl_FragColor.rgb);
gl_FragColor.rgb = vec3(
dot(gl_FragColor.rgb, weights.xyz),
dot(gl_FragColor.rgb, weights.zxy),
dot(gl_FragColor.rgb, weights.yzx)
);
// saturation
float average = (gl_FragColor.r + gl_FragColor.g + gl_FragColor.b) / 3.0;
if (saturation > 0.0) {
gl_FragColor.rgb += (average - gl_FragColor.rgb) * (1.0 - 1.0 / (1.001 - saturation));
} else {
gl_FragColor.rgb += (average - gl_FragColor.rgb) * (-saturation);
}
}`
};
export { HueSaturationShader };

View File

@ -0,0 +1,58 @@
/**
* Kaleidoscope Shader
* Radial reflection around center point
* Ported from: http://pixelshaders.com/editor/
* by Toby Schachman / http://tobyschachman.com/
*
* sides: number of reflections
* angle: initial angle in radians
*/
const KaleidoShader = {
name: 'KaleidoShader',
uniforms: {
'tDiffuse': { value: null },
'sides': { value: 6.0 },
'angle': { value: 0.0 }
},
vertexShader: /* glsl */`
varying vec2 vUv;
void main() {
vUv = uv;
gl_Position = projectionMatrix * modelViewMatrix * vec4( position, 1.0 );
}`,
fragmentShader: /* glsl */`
uniform sampler2D tDiffuse;
uniform float sides;
uniform float angle;
varying vec2 vUv;
void main() {
vec2 p = vUv - 0.5;
float r = length(p);
float a = atan(p.y, p.x) + angle;
float tau = 2. * 3.1416 ;
a = mod(a, tau/sides);
a = abs(a - tau/sides/2.) ;
p = r * vec2(cos(a), sin(a));
vec4 color = texture2D(tDiffuse, p + 0.5);
gl_FragColor = color;
}`
};
export { KaleidoShader };

View File

@ -0,0 +1,66 @@
import {
Color
} from 'three';
/**
* Luminosity
* http://en.wikipedia.org/wiki/Luminosity
*/
const LuminosityHighPassShader = {
name: 'LuminosityHighPassShader',
shaderID: 'luminosityHighPass',
uniforms: {
'tDiffuse': { value: null },
'luminosityThreshold': { value: 1.0 },
'smoothWidth': { value: 1.0 },
'defaultColor': { value: new Color( 0x000000 ) },
'defaultOpacity': { value: 0.0 }
},
vertexShader: /* glsl */`
varying vec2 vUv;
void main() {
vUv = uv;
gl_Position = projectionMatrix * modelViewMatrix * vec4( position, 1.0 );
}`,
fragmentShader: /* glsl */`
uniform sampler2D tDiffuse;
uniform vec3 defaultColor;
uniform float defaultOpacity;
uniform float luminosityThreshold;
uniform float smoothWidth;
varying vec2 vUv;
void main() {
vec4 texel = texture2D( tDiffuse, vUv );
vec3 luma = vec3( 0.299, 0.587, 0.114 );
float v = dot( texel.xyz, luma );
vec4 outputColor = vec4( defaultColor.rgb, defaultOpacity );
float alpha = smoothstep( luminosityThreshold, luminosityThreshold + smoothWidth, v );
gl_FragColor = mix( outputColor, texel, alpha );
}`
};
export { LuminosityHighPassShader };

View File

@ -0,0 +1,48 @@
/**
* Luminosity
* http://en.wikipedia.org/wiki/Luminosity
*/
const LuminosityShader = {
name: 'LuminosityShader',
uniforms: {
'tDiffuse': { value: null }
},
vertexShader: /* glsl */`
varying vec2 vUv;
void main() {
vUv = uv;
gl_Position = projectionMatrix * modelViewMatrix * vec4( position, 1.0 );
}`,
fragmentShader: /* glsl */`
#include <common>
uniform sampler2D tDiffuse;
varying vec2 vUv;
void main() {
vec4 texel = texture2D( tDiffuse, vUv );
float l = luminance( texel.rgb );
gl_FragColor = vec4( l, l, l, texel.w );
}`
};
export { LuminosityShader };

View File

@ -0,0 +1,134 @@
/**
* MMD Toon Shader
*
* This shader is extended from MeshPhongMaterial, and merged algorithms with
* MeshToonMaterial and MeshMetcapMaterial.
* Ideas came from https://github.com/mrdoob/three.js/issues/19609
*
* Combining steps:
* * Declare matcap uniform.
* * Add gradientmap_pars_fragment.
* * Use gradient irradiances instead of dotNL irradiance from MeshPhongMaterial.
* (Replace lights_phong_pars_fragment with lights_mmd_toon_pars_fragment)
* * Add mmd_toon_matcap_fragment.
*/
import { UniformsUtils, ShaderLib } from 'three';
const lights_mmd_toon_pars_fragment = /* glsl */`
varying vec3 vViewPosition;
struct BlinnPhongMaterial {
vec3 diffuseColor;
vec3 specularColor;
float specularShininess;
float specularStrength;
};
void RE_Direct_BlinnPhong( const in IncidentLight directLight, const in vec3 geometryPosition, const in vec3 geometryNormal, const in vec3 geometryViewDir, const in vec3 geometryClearcoatNormal, const in BlinnPhongMaterial material, inout ReflectedLight reflectedLight ) {
vec3 irradiance = getGradientIrradiance( geometryNormal, directLight.direction ) * directLight.color;
reflectedLight.directDiffuse += irradiance * BRDF_Lambert( material.diffuseColor );
reflectedLight.directSpecular += irradiance * BRDF_BlinnPhong( directLight.direction, geometryViewDir, geometryNormal, material.specularColor, material.specularShininess ) * material.specularStrength;
}
void RE_IndirectDiffuse_BlinnPhong( const in vec3 irradiance, const in vec3 geometryPosition, const in vec3 geometryNormal, const in vec3 geometryViewDir, const in vec3 geometryClearcoatNormal, const in BlinnPhongMaterial material, inout ReflectedLight reflectedLight ) {
reflectedLight.indirectDiffuse += irradiance * BRDF_Lambert( material.diffuseColor );
}
#define RE_Direct RE_Direct_BlinnPhong
#define RE_IndirectDiffuse RE_IndirectDiffuse_BlinnPhong
`;
const mmd_toon_matcap_fragment = /* glsl */`
#ifdef USE_MATCAP
vec3 viewDir = normalize( vViewPosition );
vec3 x = normalize( vec3( viewDir.z, 0.0, - viewDir.x ) );
vec3 y = cross( viewDir, x );
vec2 uv = vec2( dot( x, normal ), dot( y, normal ) ) * 0.495 + 0.5; // 0.495 to remove artifacts caused by undersized matcap disks
vec4 matcapColor = texture2D( matcap, uv );
#ifdef MATCAP_BLENDING_MULTIPLY
outgoingLight *= matcapColor.rgb;
#elif defined( MATCAP_BLENDING_ADD )
outgoingLight += matcapColor.rgb;
#endif
#endif
`;
const MMDToonShader = {
name: 'MMDToonShader',
defines: {
TOON: true,
MATCAP: true,
MATCAP_BLENDING_ADD: true,
},
uniforms: UniformsUtils.merge( [
ShaderLib.toon.uniforms,
ShaderLib.phong.uniforms,
ShaderLib.matcap.uniforms,
] ),
vertexShader:
ShaderLib.phong.vertexShader
.replace(
'#include <envmap_pars_vertex>',
''
)
.replace(
'#include <envmap_vertex>',
''
),
fragmentShader:
ShaderLib.phong.fragmentShader
.replace(
'#include <common>',
`
#ifdef USE_MATCAP
uniform sampler2D matcap;
#endif
#include <common>
`
)
.replace(
'#include <envmap_common_pars_fragment>',
`
#include <gradientmap_pars_fragment>
`
)
.replace(
'#include <envmap_pars_fragment>',
''
)
.replace(
'#include <lights_phong_pars_fragment>',
lights_mmd_toon_pars_fragment
)
.replace(
'#include <envmap_fragment>',
`
${mmd_toon_matcap_fragment}
`
)
};
export { MMDToonShader };

View File

@ -0,0 +1,56 @@
/**
* Mirror Shader
* Copies half the input to the other half
*
* side: side of input to mirror (0 = left, 1 = right, 2 = top, 3 = bottom)
*/
const MirrorShader = {
name: 'MirrorShader',
uniforms: {
'tDiffuse': { value: null },
'side': { value: 1 }
},
vertexShader: /* glsl */`
varying vec2 vUv;
void main() {
vUv = uv;
gl_Position = projectionMatrix * modelViewMatrix * vec4( position, 1.0 );
}`,
fragmentShader: /* glsl */`
uniform sampler2D tDiffuse;
uniform int side;
varying vec2 vUv;
void main() {
vec2 p = vUv;
if (side == 0){
if (p.x > 0.5) p.x = 1.0 - p.x;
}else if (side == 1){
if (p.x < 0.5) p.x = 1.0 - p.x;
}else if (side == 2){
if (p.y < 0.5) p.y = 1.0 - p.y;
}else if (side == 3){
if (p.y > 0.5) p.y = 1.0 - p.y;
}
vec4 color = texture2D(tDiffuse, p);
gl_FragColor = color;
}`
};
export { MirrorShader };

View File

@ -0,0 +1,55 @@
import {
Vector2
} from 'three';
/**
* Normal map shader
* - compute normals from heightmap
*/
const NormalMapShader = {
name: 'NormalMapShader',
uniforms: {
'heightMap': { value: null },
'resolution': { value: new Vector2( 512, 512 ) },
'scale': { value: new Vector2( 1, 1 ) },
'height': { value: 0.05 }
},
vertexShader: /* glsl */`
varying vec2 vUv;
void main() {
vUv = uv;
gl_Position = projectionMatrix * modelViewMatrix * vec4( position, 1.0 );
}`,
fragmentShader: /* glsl */`
uniform float height;
uniform vec2 resolution;
uniform sampler2D heightMap;
varying vec2 vUv;
void main() {
float val = texture2D( heightMap, vUv ).x;
float valU = texture2D( heightMap, vUv + vec2( 1.0 / resolution.x, 0.0 ) ).x;
float valV = texture2D( heightMap, vUv + vec2( 0.0, 1.0 / resolution.y ) ).x;
gl_FragColor = vec4( ( 0.5 * normalize( vec3( val - valU, val - valV, height ) ) + 0.5 ), 1.0 );
}`
};
export { NormalMapShader };

View File

@ -0,0 +1,85 @@
const OutputShader = {
name: 'OutputShader',
uniforms: {
'tDiffuse': { value: null },
'toneMappingExposure': { value: 1 }
},
vertexShader: /* glsl */`
precision highp float;
uniform mat4 modelViewMatrix;
uniform mat4 projectionMatrix;
attribute vec3 position;
attribute vec2 uv;
varying vec2 vUv;
void main() {
vUv = uv;
gl_Position = projectionMatrix * modelViewMatrix * vec4( position, 1.0 );
}`,
fragmentShader: /* glsl */`
precision highp float;
uniform sampler2D tDiffuse;
#include <tonemapping_pars_fragment>
#include <colorspace_pars_fragment>
varying vec2 vUv;
void main() {
gl_FragColor = texture2D( tDiffuse, vUv );
// tone mapping
#ifdef LINEAR_TONE_MAPPING
gl_FragColor.rgb = LinearToneMapping( gl_FragColor.rgb );
#elif defined( REINHARD_TONE_MAPPING )
gl_FragColor.rgb = ReinhardToneMapping( gl_FragColor.rgb );
#elif defined( CINEON_TONE_MAPPING )
gl_FragColor.rgb = OptimizedCineonToneMapping( gl_FragColor.rgb );
#elif defined( ACES_FILMIC_TONE_MAPPING )
gl_FragColor.rgb = ACESFilmicToneMapping( gl_FragColor.rgb );
#elif defined( AGX_TONE_MAPPING )
gl_FragColor.rgb = AgXToneMapping( gl_FragColor.rgb );
#elif defined( NEUTRAL_TONE_MAPPING )
gl_FragColor.rgb = NeutralToneMapping( gl_FragColor.rgb );
#endif
// color space
#ifdef SRGB_TRANSFER
gl_FragColor = sRGBTransferOETF( gl_FragColor );
#endif
}`
};
export { OutputShader };

View File

@ -0,0 +1,226 @@
import {
Matrix4,
Vector2,
Vector3,
} from 'three';
/**
* References:
* https://openaccess.thecvf.com/content/WACV2021/papers/Khademi_Self-Supervised_Poisson-Gaussian_Denoising_WACV_2021_paper.pdf
* https://arxiv.org/pdf/2206.01856.pdf
*/
const PoissonDenoiseShader = {
name: 'PoissonDenoiseShader',
defines: {
'SAMPLES': 16,
'SAMPLE_VECTORS': generatePdSamplePointInitializer( 16, 2, 1 ),
'NORMAL_VECTOR_TYPE': 1,
'DEPTH_VALUE_SOURCE': 0,
},
uniforms: {
'tDiffuse': { value: null },
'tNormal': { value: null },
'tDepth': { value: null },
'tNoise': { value: null },
'resolution': { value: new Vector2() },
'cameraProjectionMatrixInverse': { value: new Matrix4() },
'lumaPhi': { value: 5. },
'depthPhi': { value: 5. },
'normalPhi': { value: 5. },
'radius': { value: 4. },
'index': { value: 0 }
},
vertexShader: /* glsl */`
varying vec2 vUv;
void main() {
vUv = uv;
gl_Position = projectionMatrix * modelViewMatrix * vec4( position, 1.0 );
}`,
fragmentShader: /* glsl */`
varying vec2 vUv;
uniform sampler2D tDiffuse;
uniform sampler2D tNormal;
uniform sampler2D tDepth;
uniform sampler2D tNoise;
uniform vec2 resolution;
uniform mat4 cameraProjectionMatrixInverse;
uniform float lumaPhi;
uniform float depthPhi;
uniform float normalPhi;
uniform float radius;
uniform int index;
#include <common>
#include <packing>
#ifndef SAMPLE_LUMINANCE
#define SAMPLE_LUMINANCE dot(vec3(0.2125, 0.7154, 0.0721), a)
#endif
#ifndef FRAGMENT_OUTPUT
#define FRAGMENT_OUTPUT vec4(denoised, 1.)
#endif
float getLuminance(const in vec3 a) {
return SAMPLE_LUMINANCE;
}
const vec3 poissonDisk[SAMPLES] = SAMPLE_VECTORS;
vec3 getViewPosition(const in vec2 screenPosition, const in float depth) {
vec4 clipSpacePosition = vec4(vec3(screenPosition, depth) * 2.0 - 1.0, 1.0);
vec4 viewSpacePosition = cameraProjectionMatrixInverse * clipSpacePosition;
return viewSpacePosition.xyz / viewSpacePosition.w;
}
float getDepth(const vec2 uv) {
#if DEPTH_VALUE_SOURCE == 1
return textureLod(tDepth, uv.xy, 0.0).a;
#else
return textureLod(tDepth, uv.xy, 0.0).r;
#endif
}
float fetchDepth(const ivec2 uv) {
#if DEPTH_VALUE_SOURCE == 1
return texelFetch(tDepth, uv.xy, 0).a;
#else
return texelFetch(tDepth, uv.xy, 0).r;
#endif
}
vec3 computeNormalFromDepth(const vec2 uv) {
vec2 size = vec2(textureSize(tDepth, 0));
ivec2 p = ivec2(uv * size);
float c0 = fetchDepth(p);
float l2 = fetchDepth(p - ivec2(2, 0));
float l1 = fetchDepth(p - ivec2(1, 0));
float r1 = fetchDepth(p + ivec2(1, 0));
float r2 = fetchDepth(p + ivec2(2, 0));
float b2 = fetchDepth(p - ivec2(0, 2));
float b1 = fetchDepth(p - ivec2(0, 1));
float t1 = fetchDepth(p + ivec2(0, 1));
float t2 = fetchDepth(p + ivec2(0, 2));
float dl = abs((2.0 * l1 - l2) - c0);
float dr = abs((2.0 * r1 - r2) - c0);
float db = abs((2.0 * b1 - b2) - c0);
float dt = abs((2.0 * t1 - t2) - c0);
vec3 ce = getViewPosition(uv, c0).xyz;
vec3 dpdx = (dl < dr) ? ce - getViewPosition((uv - vec2(1.0 / size.x, 0.0)), l1).xyz
: -ce + getViewPosition((uv + vec2(1.0 / size.x, 0.0)), r1).xyz;
vec3 dpdy = (db < dt) ? ce - getViewPosition((uv - vec2(0.0, 1.0 / size.y)), b1).xyz
: -ce + getViewPosition((uv + vec2(0.0, 1.0 / size.y)), t1).xyz;
return normalize(cross(dpdx, dpdy));
}
vec3 getViewNormal(const vec2 uv) {
#if NORMAL_VECTOR_TYPE == 2
return normalize(textureLod(tNormal, uv, 0.).rgb);
#elif NORMAL_VECTOR_TYPE == 1
return unpackRGBToNormal(textureLod(tNormal, uv, 0.).rgb);
#else
return computeNormalFromDepth(uv);
#endif
}
void denoiseSample(in vec3 center, in vec3 viewNormal, in vec3 viewPos, in vec2 sampleUv, inout vec3 denoised, inout float totalWeight) {
vec4 sampleTexel = textureLod(tDiffuse, sampleUv, 0.0);
float sampleDepth = getDepth(sampleUv);
vec3 sampleNormal = getViewNormal(sampleUv);
vec3 neighborColor = sampleTexel.rgb;
vec3 viewPosSample = getViewPosition(sampleUv, sampleDepth);
float normalDiff = dot(viewNormal, sampleNormal);
float normalSimilarity = pow(max(normalDiff, 0.), normalPhi);
float lumaDiff = abs(getLuminance(neighborColor) - getLuminance(center));
float lumaSimilarity = max(1.0 - lumaDiff / lumaPhi, 0.0);
float depthDiff = abs(dot(viewPos - viewPosSample, viewNormal));
float depthSimilarity = max(1. - depthDiff / depthPhi, 0.);
float w = lumaSimilarity * depthSimilarity * normalSimilarity;
denoised += w * neighborColor;
totalWeight += w;
}
void main() {
float depth = getDepth(vUv.xy);
vec3 viewNormal = getViewNormal(vUv);
if (depth == 1. || dot(viewNormal, viewNormal) == 0.) {
discard;
return;
}
vec4 texel = textureLod(tDiffuse, vUv, 0.0);
vec3 center = texel.rgb;
vec3 viewPos = getViewPosition(vUv, depth);
vec2 noiseResolution = vec2(textureSize(tNoise, 0));
vec2 noiseUv = vUv * resolution / noiseResolution;
vec4 noiseTexel = textureLod(tNoise, noiseUv, 0.0);
vec2 noiseVec = vec2(sin(noiseTexel[index % 4] * 2. * PI), cos(noiseTexel[index % 4] * 2. * PI));
mat2 rotationMatrix = mat2(noiseVec.x, -noiseVec.y, noiseVec.x, noiseVec.y);
float totalWeight = 1.0;
vec3 denoised = texel.rgb;
for (int i = 0; i < SAMPLES; i++) {
vec3 sampleDir = poissonDisk[i];
vec2 offset = rotationMatrix * (sampleDir.xy * (1. + sampleDir.z * (radius - 1.)) / resolution);
vec2 sampleUv = vUv + offset;
denoiseSample(center, viewNormal, viewPos, sampleUv, denoised, totalWeight);
}
if (totalWeight > 0.) {
denoised /= totalWeight;
}
gl_FragColor = FRAGMENT_OUTPUT;
}`
};
function generatePdSamplePointInitializer( samples, rings, radiusExponent ) {
const poissonDisk = generateDenoiseSamples(
samples,
rings,
radiusExponent,
);
let glslCode = 'vec3[SAMPLES](';
for ( let i = 0; i < samples; i ++ ) {
const sample = poissonDisk[ i ];
glslCode += `vec3(${sample.x}, ${sample.y}, ${sample.z})${( i < samples - 1 ) ? ',' : ')'}`;
}
return glslCode;
}
function generateDenoiseSamples( numSamples, numRings, radiusExponent ) {
const samples = [];
for ( let i = 0; i < numSamples; i ++ ) {
const angle = 2 * Math.PI * numRings * i / numSamples;
const radius = Math.pow( i / ( numSamples - 1 ), radiusExponent );
samples.push( new Vector3( Math.cos( angle ), Math.sin( angle ), radius ) );
}
return samples;
}
export { generatePdSamplePointInitializer, PoissonDenoiseShader };

View File

@ -0,0 +1,54 @@
/**
* RGB Shift Shader
* Shifts red and blue channels from center in opposite directions
* Ported from https://web.archive.org/web/20090820185047/http://kriss.cx/tom/2009/05/rgb-shift/
* by Tom Butterworth / https://web.archive.org/web/20090810054752/http://kriss.cx/tom/
*
* amount: shift distance (1 is width of input)
* angle: shift angle in radians
*/
const RGBShiftShader = {
name: 'RGBShiftShader',
uniforms: {
'tDiffuse': { value: null },
'amount': { value: 0.005 },
'angle': { value: 0.0 }
},
vertexShader: /* glsl */`
varying vec2 vUv;
void main() {
vUv = uv;
gl_Position = projectionMatrix * modelViewMatrix * vec4( position, 1.0 );
}`,
fragmentShader: /* glsl */`
uniform sampler2D tDiffuse;
uniform float amount;
uniform float angle;
varying vec2 vUv;
void main() {
vec2 offset = amount * vec2( cos(angle), sin(angle));
vec4 cr = texture2D(tDiffuse, vUv + offset);
vec4 cga = texture2D(tDiffuse, vUv);
vec4 cb = texture2D(tDiffuse, vUv - offset);
gl_FragColor = vec4(cr.r, cga.g, cb.b, cga.a);
}`
};
export { RGBShiftShader };

View File

@ -0,0 +1,179 @@
import {
Matrix4,
Vector2
} from 'three';
/**
* TODO
*/
const SAOShader = {
name: 'SAOShader',
defines: {
'NUM_SAMPLES': 7,
'NUM_RINGS': 4,
'DIFFUSE_TEXTURE': 0,
'PERSPECTIVE_CAMERA': 1
},
uniforms: {
'tDepth': { value: null },
'tDiffuse': { value: null },
'tNormal': { value: null },
'size': { value: new Vector2( 512, 512 ) },
'cameraNear': { value: 1 },
'cameraFar': { value: 100 },
'cameraProjectionMatrix': { value: new Matrix4() },
'cameraInverseProjectionMatrix': { value: new Matrix4() },
'scale': { value: 1.0 },
'intensity': { value: 0.1 },
'bias': { value: 0.5 },
'minResolution': { value: 0.0 },
'kernelRadius': { value: 100.0 },
'randomSeed': { value: 0.0 }
},
vertexShader: /* glsl */`
varying vec2 vUv;
void main() {
vUv = uv;
gl_Position = projectionMatrix * modelViewMatrix * vec4( position, 1.0 );
}`,
fragmentShader: /* glsl */`
#include <common>
varying vec2 vUv;
#if DIFFUSE_TEXTURE == 1
uniform sampler2D tDiffuse;
#endif
uniform highp sampler2D tDepth;
uniform highp sampler2D tNormal;
uniform float cameraNear;
uniform float cameraFar;
uniform mat4 cameraProjectionMatrix;
uniform mat4 cameraInverseProjectionMatrix;
uniform float scale;
uniform float intensity;
uniform float bias;
uniform float kernelRadius;
uniform float minResolution;
uniform vec2 size;
uniform float randomSeed;
// RGBA depth
#include <packing>
vec4 getDefaultColor( const in vec2 screenPosition ) {
#if DIFFUSE_TEXTURE == 1
return texture2D( tDiffuse, vUv );
#else
return vec4( 1.0 );
#endif
}
float getDepth( const in vec2 screenPosition ) {
return texture2D( tDepth, screenPosition ).x;
}
float getViewZ( const in float depth ) {
#if PERSPECTIVE_CAMERA == 1
return perspectiveDepthToViewZ( depth, cameraNear, cameraFar );
#else
return orthographicDepthToViewZ( depth, cameraNear, cameraFar );
#endif
}
vec3 getViewPosition( const in vec2 screenPosition, const in float depth, const in float viewZ ) {
float clipW = cameraProjectionMatrix[2][3] * viewZ + cameraProjectionMatrix[3][3];
vec4 clipPosition = vec4( ( vec3( screenPosition, depth ) - 0.5 ) * 2.0, 1.0 );
clipPosition *= clipW; // unprojection.
return ( cameraInverseProjectionMatrix * clipPosition ).xyz;
}
vec3 getViewNormal( const in vec3 viewPosition, const in vec2 screenPosition ) {
return unpackRGBToNormal( texture2D( tNormal, screenPosition ).xyz );
}
float scaleDividedByCameraFar;
float minResolutionMultipliedByCameraFar;
float getOcclusion( const in vec3 centerViewPosition, const in vec3 centerViewNormal, const in vec3 sampleViewPosition ) {
vec3 viewDelta = sampleViewPosition - centerViewPosition;
float viewDistance = length( viewDelta );
float scaledScreenDistance = scaleDividedByCameraFar * viewDistance;
return max(0.0, (dot(centerViewNormal, viewDelta) - minResolutionMultipliedByCameraFar) / scaledScreenDistance - bias) / (1.0 + pow2( scaledScreenDistance ) );
}
// moving costly divides into consts
const float ANGLE_STEP = PI2 * float( NUM_RINGS ) / float( NUM_SAMPLES );
const float INV_NUM_SAMPLES = 1.0 / float( NUM_SAMPLES );
float getAmbientOcclusion( const in vec3 centerViewPosition ) {
// precompute some variables require in getOcclusion.
scaleDividedByCameraFar = scale / cameraFar;
minResolutionMultipliedByCameraFar = minResolution * cameraFar;
vec3 centerViewNormal = getViewNormal( centerViewPosition, vUv );
// jsfiddle that shows sample pattern: https://jsfiddle.net/a16ff1p7/
float angle = rand( vUv + randomSeed ) * PI2;
vec2 radius = vec2( kernelRadius * INV_NUM_SAMPLES ) / size;
vec2 radiusStep = radius;
float occlusionSum = 0.0;
float weightSum = 0.0;
for( int i = 0; i < NUM_SAMPLES; i ++ ) {
vec2 sampleUv = vUv + vec2( cos( angle ), sin( angle ) ) * radius;
radius += radiusStep;
angle += ANGLE_STEP;
float sampleDepth = getDepth( sampleUv );
if( sampleDepth >= ( 1.0 - EPSILON ) ) {
continue;
}
float sampleViewZ = getViewZ( sampleDepth );
vec3 sampleViewPosition = getViewPosition( sampleUv, sampleDepth, sampleViewZ );
occlusionSum += getOcclusion( centerViewPosition, centerViewNormal, sampleViewPosition );
weightSum += 1.0;
}
if( weightSum == 0.0 ) discard;
return occlusionSum * ( intensity / weightSum );
}
void main() {
float centerDepth = getDepth( vUv );
if( centerDepth >= ( 1.0 - EPSILON ) ) {
discard;
}
float centerViewZ = getViewZ( centerDepth );
vec3 viewPosition = getViewPosition( vUv, centerDepth, centerViewZ );
float ambientOcclusion = getAmbientOcclusion( viewPosition );
gl_FragColor = getDefaultColor( vUv );
gl_FragColor.xyz *= 1.0 - ambientOcclusion;
}`
};
export { SAOShader };

View File

@ -0,0 +1,466 @@
import {
Vector2
} from 'three';
/**
* WebGL port of Subpixel Morphological Antialiasing (SMAA) v2.8
* Preset: SMAA 1x Medium (with color edge detection)
* https://github.com/iryoku/smaa/releases/tag/v2.8
*/
const SMAAEdgesShader = {
name: 'SMAAEdgesShader',
defines: {
'SMAA_THRESHOLD': '0.1'
},
uniforms: {
'tDiffuse': { value: null },
'resolution': { value: new Vector2( 1 / 1024, 1 / 512 ) }
},
vertexShader: /* glsl */`
uniform vec2 resolution;
varying vec2 vUv;
varying vec4 vOffset[ 3 ];
void SMAAEdgeDetectionVS( vec2 texcoord ) {
vOffset[ 0 ] = texcoord.xyxy + resolution.xyxy * vec4( -1.0, 0.0, 0.0, 1.0 ); // WebGL port note: Changed sign in W component
vOffset[ 1 ] = texcoord.xyxy + resolution.xyxy * vec4( 1.0, 0.0, 0.0, -1.0 ); // WebGL port note: Changed sign in W component
vOffset[ 2 ] = texcoord.xyxy + resolution.xyxy * vec4( -2.0, 0.0, 0.0, 2.0 ); // WebGL port note: Changed sign in W component
}
void main() {
vUv = uv;
SMAAEdgeDetectionVS( vUv );
gl_Position = projectionMatrix * modelViewMatrix * vec4( position, 1.0 );
}`,
fragmentShader: /* glsl */`
uniform sampler2D tDiffuse;
varying vec2 vUv;
varying vec4 vOffset[ 3 ];
vec4 SMAAColorEdgeDetectionPS( vec2 texcoord, vec4 offset[3], sampler2D colorTex ) {
vec2 threshold = vec2( SMAA_THRESHOLD, SMAA_THRESHOLD );
// Calculate color deltas:
vec4 delta;
vec3 C = texture2D( colorTex, texcoord ).rgb;
vec3 Cleft = texture2D( colorTex, offset[0].xy ).rgb;
vec3 t = abs( C - Cleft );
delta.x = max( max( t.r, t.g ), t.b );
vec3 Ctop = texture2D( colorTex, offset[0].zw ).rgb;
t = abs( C - Ctop );
delta.y = max( max( t.r, t.g ), t.b );
// We do the usual threshold:
vec2 edges = step( threshold, delta.xy );
// Then discard if there is no edge:
if ( dot( edges, vec2( 1.0, 1.0 ) ) == 0.0 )
discard;
// Calculate right and bottom deltas:
vec3 Cright = texture2D( colorTex, offset[1].xy ).rgb;
t = abs( C - Cright );
delta.z = max( max( t.r, t.g ), t.b );
vec3 Cbottom = texture2D( colorTex, offset[1].zw ).rgb;
t = abs( C - Cbottom );
delta.w = max( max( t.r, t.g ), t.b );
// Calculate the maximum delta in the direct neighborhood:
float maxDelta = max( max( max( delta.x, delta.y ), delta.z ), delta.w );
// Calculate left-left and top-top deltas:
vec3 Cleftleft = texture2D( colorTex, offset[2].xy ).rgb;
t = abs( C - Cleftleft );
delta.z = max( max( t.r, t.g ), t.b );
vec3 Ctoptop = texture2D( colorTex, offset[2].zw ).rgb;
t = abs( C - Ctoptop );
delta.w = max( max( t.r, t.g ), t.b );
// Calculate the final maximum delta:
maxDelta = max( max( maxDelta, delta.z ), delta.w );
// Local contrast adaptation in action:
edges.xy *= step( 0.5 * maxDelta, delta.xy );
return vec4( edges, 0.0, 0.0 );
}
void main() {
gl_FragColor = SMAAColorEdgeDetectionPS( vUv, vOffset, tDiffuse );
}`
};
const SMAAWeightsShader = {
name: 'SMAAWeightsShader',
defines: {
'SMAA_MAX_SEARCH_STEPS': '8',
'SMAA_AREATEX_MAX_DISTANCE': '16',
'SMAA_AREATEX_PIXEL_SIZE': '( 1.0 / vec2( 160.0, 560.0 ) )',
'SMAA_AREATEX_SUBTEX_SIZE': '( 1.0 / 7.0 )'
},
uniforms: {
'tDiffuse': { value: null },
'tArea': { value: null },
'tSearch': { value: null },
'resolution': { value: new Vector2( 1 / 1024, 1 / 512 ) }
},
vertexShader: /* glsl */`
uniform vec2 resolution;
varying vec2 vUv;
varying vec4 vOffset[ 3 ];
varying vec2 vPixcoord;
void SMAABlendingWeightCalculationVS( vec2 texcoord ) {
vPixcoord = texcoord / resolution;
// We will use these offsets for the searches later on (see @PSEUDO_GATHER4):
vOffset[ 0 ] = texcoord.xyxy + resolution.xyxy * vec4( -0.25, 0.125, 1.25, 0.125 ); // WebGL port note: Changed sign in Y and W components
vOffset[ 1 ] = texcoord.xyxy + resolution.xyxy * vec4( -0.125, 0.25, -0.125, -1.25 ); // WebGL port note: Changed sign in Y and W components
// And these for the searches, they indicate the ends of the loops:
vOffset[ 2 ] = vec4( vOffset[ 0 ].xz, vOffset[ 1 ].yw ) + vec4( -2.0, 2.0, -2.0, 2.0 ) * resolution.xxyy * float( SMAA_MAX_SEARCH_STEPS );
}
void main() {
vUv = uv;
SMAABlendingWeightCalculationVS( vUv );
gl_Position = projectionMatrix * modelViewMatrix * vec4( position, 1.0 );
}`,
fragmentShader: /* glsl */`
#define SMAASampleLevelZeroOffset( tex, coord, offset ) texture2D( tex, coord + float( offset ) * resolution, 0.0 )
uniform sampler2D tDiffuse;
uniform sampler2D tArea;
uniform sampler2D tSearch;
uniform vec2 resolution;
varying vec2 vUv;
varying vec4 vOffset[3];
varying vec2 vPixcoord;
#if __VERSION__ == 100
vec2 round( vec2 x ) {
return sign( x ) * floor( abs( x ) + 0.5 );
}
#endif
float SMAASearchLength( sampler2D searchTex, vec2 e, float bias, float scale ) {
// Not required if searchTex accesses are set to point:
// float2 SEARCH_TEX_PIXEL_SIZE = 1.0 / float2(66.0, 33.0);
// e = float2(bias, 0.0) + 0.5 * SEARCH_TEX_PIXEL_SIZE +
// e * float2(scale, 1.0) * float2(64.0, 32.0) * SEARCH_TEX_PIXEL_SIZE;
e.r = bias + e.r * scale;
return 255.0 * texture2D( searchTex, e, 0.0 ).r;
}
float SMAASearchXLeft( sampler2D edgesTex, sampler2D searchTex, vec2 texcoord, float end ) {
/**
* @PSEUDO_GATHER4
* This texcoord has been offset by (-0.25, -0.125) in the vertex shader to
* sample between edge, thus fetching four edges in a row.
* Sampling with different offsets in each direction allows to disambiguate
* which edges are active from the four fetched ones.
*/
vec2 e = vec2( 0.0, 1.0 );
for ( int i = 0; i < SMAA_MAX_SEARCH_STEPS; i ++ ) { // WebGL port note: Changed while to for
e = texture2D( edgesTex, texcoord, 0.0 ).rg;
texcoord -= vec2( 2.0, 0.0 ) * resolution;
if ( ! ( texcoord.x > end && e.g > 0.8281 && e.r == 0.0 ) ) break;
}
// We correct the previous (-0.25, -0.125) offset we applied:
texcoord.x += 0.25 * resolution.x;
// The searches are bias by 1, so adjust the coords accordingly:
texcoord.x += resolution.x;
// Disambiguate the length added by the last step:
texcoord.x += 2.0 * resolution.x; // Undo last step
texcoord.x -= resolution.x * SMAASearchLength(searchTex, e, 0.0, 0.5);
return texcoord.x;
}
float SMAASearchXRight( sampler2D edgesTex, sampler2D searchTex, vec2 texcoord, float end ) {
vec2 e = vec2( 0.0, 1.0 );
for ( int i = 0; i < SMAA_MAX_SEARCH_STEPS; i ++ ) { // WebGL port note: Changed while to for
e = texture2D( edgesTex, texcoord, 0.0 ).rg;
texcoord += vec2( 2.0, 0.0 ) * resolution;
if ( ! ( texcoord.x < end && e.g > 0.8281 && e.r == 0.0 ) ) break;
}
texcoord.x -= 0.25 * resolution.x;
texcoord.x -= resolution.x;
texcoord.x -= 2.0 * resolution.x;
texcoord.x += resolution.x * SMAASearchLength( searchTex, e, 0.5, 0.5 );
return texcoord.x;
}
float SMAASearchYUp( sampler2D edgesTex, sampler2D searchTex, vec2 texcoord, float end ) {
vec2 e = vec2( 1.0, 0.0 );
for ( int i = 0; i < SMAA_MAX_SEARCH_STEPS; i ++ ) { // WebGL port note: Changed while to for
e = texture2D( edgesTex, texcoord, 0.0 ).rg;
texcoord += vec2( 0.0, 2.0 ) * resolution; // WebGL port note: Changed sign
if ( ! ( texcoord.y > end && e.r > 0.8281 && e.g == 0.0 ) ) break;
}
texcoord.y -= 0.25 * resolution.y; // WebGL port note: Changed sign
texcoord.y -= resolution.y; // WebGL port note: Changed sign
texcoord.y -= 2.0 * resolution.y; // WebGL port note: Changed sign
texcoord.y += resolution.y * SMAASearchLength( searchTex, e.gr, 0.0, 0.5 ); // WebGL port note: Changed sign
return texcoord.y;
}
float SMAASearchYDown( sampler2D edgesTex, sampler2D searchTex, vec2 texcoord, float end ) {
vec2 e = vec2( 1.0, 0.0 );
for ( int i = 0; i < SMAA_MAX_SEARCH_STEPS; i ++ ) { // WebGL port note: Changed while to for
e = texture2D( edgesTex, texcoord, 0.0 ).rg;
texcoord -= vec2( 0.0, 2.0 ) * resolution; // WebGL port note: Changed sign
if ( ! ( texcoord.y < end && e.r > 0.8281 && e.g == 0.0 ) ) break;
}
texcoord.y += 0.25 * resolution.y; // WebGL port note: Changed sign
texcoord.y += resolution.y; // WebGL port note: Changed sign
texcoord.y += 2.0 * resolution.y; // WebGL port note: Changed sign
texcoord.y -= resolution.y * SMAASearchLength( searchTex, e.gr, 0.5, 0.5 ); // WebGL port note: Changed sign
return texcoord.y;
}
vec2 SMAAArea( sampler2D areaTex, vec2 dist, float e1, float e2, float offset ) {
// Rounding prevents precision errors of bilinear filtering:
vec2 texcoord = float( SMAA_AREATEX_MAX_DISTANCE ) * round( 4.0 * vec2( e1, e2 ) ) + dist;
// We do a scale and bias for mapping to texel space:
texcoord = SMAA_AREATEX_PIXEL_SIZE * texcoord + ( 0.5 * SMAA_AREATEX_PIXEL_SIZE );
// Move to proper place, according to the subpixel offset:
texcoord.y += SMAA_AREATEX_SUBTEX_SIZE * offset;
return texture2D( areaTex, texcoord, 0.0 ).rg;
}
vec4 SMAABlendingWeightCalculationPS( vec2 texcoord, vec2 pixcoord, vec4 offset[ 3 ], sampler2D edgesTex, sampler2D areaTex, sampler2D searchTex, ivec4 subsampleIndices ) {
vec4 weights = vec4( 0.0, 0.0, 0.0, 0.0 );
vec2 e = texture2D( edgesTex, texcoord ).rg;
if ( e.g > 0.0 ) { // Edge at north
vec2 d;
// Find the distance to the left:
vec2 coords;
coords.x = SMAASearchXLeft( edgesTex, searchTex, offset[ 0 ].xy, offset[ 2 ].x );
coords.y = offset[ 1 ].y; // offset[1].y = texcoord.y - 0.25 * resolution.y (@CROSSING_OFFSET)
d.x = coords.x;
// Now fetch the left crossing edges, two at a time using bilinear
// filtering. Sampling at -0.25 (see @CROSSING_OFFSET) enables to
// discern what value each edge has:
float e1 = texture2D( edgesTex, coords, 0.0 ).r;
// Find the distance to the right:
coords.x = SMAASearchXRight( edgesTex, searchTex, offset[ 0 ].zw, offset[ 2 ].y );
d.y = coords.x;
// We want the distances to be in pixel units (doing this here allow to
// better interleave arithmetic and memory accesses):
d = d / resolution.x - pixcoord.x;
// SMAAArea below needs a sqrt, as the areas texture is compressed
// quadratically:
vec2 sqrt_d = sqrt( abs( d ) );
// Fetch the right crossing edges:
coords.y -= 1.0 * resolution.y; // WebGL port note: Added
float e2 = SMAASampleLevelZeroOffset( edgesTex, coords, ivec2( 1, 0 ) ).r;
// Ok, we know how this pattern looks like, now it is time for getting
// the actual area:
weights.rg = SMAAArea( areaTex, sqrt_d, e1, e2, float( subsampleIndices.y ) );
}
if ( e.r > 0.0 ) { // Edge at west
vec2 d;
// Find the distance to the top:
vec2 coords;
coords.y = SMAASearchYUp( edgesTex, searchTex, offset[ 1 ].xy, offset[ 2 ].z );
coords.x = offset[ 0 ].x; // offset[1].x = texcoord.x - 0.25 * resolution.x;
d.x = coords.y;
// Fetch the top crossing edges:
float e1 = texture2D( edgesTex, coords, 0.0 ).g;
// Find the distance to the bottom:
coords.y = SMAASearchYDown( edgesTex, searchTex, offset[ 1 ].zw, offset[ 2 ].w );
d.y = coords.y;
// We want the distances to be in pixel units:
d = d / resolution.y - pixcoord.y;
// SMAAArea below needs a sqrt, as the areas texture is compressed
// quadratically:
vec2 sqrt_d = sqrt( abs( d ) );
// Fetch the bottom crossing edges:
coords.y -= 1.0 * resolution.y; // WebGL port note: Added
float e2 = SMAASampleLevelZeroOffset( edgesTex, coords, ivec2( 0, 1 ) ).g;
// Get the area for this direction:
weights.ba = SMAAArea( areaTex, sqrt_d, e1, e2, float( subsampleIndices.x ) );
}
return weights;
}
void main() {
gl_FragColor = SMAABlendingWeightCalculationPS( vUv, vPixcoord, vOffset, tDiffuse, tArea, tSearch, ivec4( 0.0 ) );
}`
};
const SMAABlendShader = {
name: 'SMAABlendShader',
uniforms: {
'tDiffuse': { value: null },
'tColor': { value: null },
'resolution': { value: new Vector2( 1 / 1024, 1 / 512 ) }
},
vertexShader: /* glsl */`
uniform vec2 resolution;
varying vec2 vUv;
varying vec4 vOffset[ 2 ];
void SMAANeighborhoodBlendingVS( vec2 texcoord ) {
vOffset[ 0 ] = texcoord.xyxy + resolution.xyxy * vec4( -1.0, 0.0, 0.0, 1.0 ); // WebGL port note: Changed sign in W component
vOffset[ 1 ] = texcoord.xyxy + resolution.xyxy * vec4( 1.0, 0.0, 0.0, -1.0 ); // WebGL port note: Changed sign in W component
}
void main() {
vUv = uv;
SMAANeighborhoodBlendingVS( vUv );
gl_Position = projectionMatrix * modelViewMatrix * vec4( position, 1.0 );
}`,
fragmentShader: /* glsl */`
uniform sampler2D tDiffuse;
uniform sampler2D tColor;
uniform vec2 resolution;
varying vec2 vUv;
varying vec4 vOffset[ 2 ];
vec4 SMAANeighborhoodBlendingPS( vec2 texcoord, vec4 offset[ 2 ], sampler2D colorTex, sampler2D blendTex ) {
// Fetch the blending weights for current pixel:
vec4 a;
a.xz = texture2D( blendTex, texcoord ).xz;
a.y = texture2D( blendTex, offset[ 1 ].zw ).g;
a.w = texture2D( blendTex, offset[ 1 ].xy ).a;
// Is there any blending weight with a value greater than 0.0?
if ( dot(a, vec4( 1.0, 1.0, 1.0, 1.0 )) < 1e-5 ) {
return texture2D( colorTex, texcoord, 0.0 );
} else {
// Up to 4 lines can be crossing a pixel (one through each edge). We
// favor blending by choosing the line with the maximum weight for each
// direction:
vec2 offset;
offset.x = a.a > a.b ? a.a : -a.b; // left vs. right
offset.y = a.g > a.r ? -a.g : a.r; // top vs. bottom // WebGL port note: Changed signs
// Then we go in the direction that has the maximum weight:
if ( abs( offset.x ) > abs( offset.y )) { // horizontal vs. vertical
offset.y = 0.0;
} else {
offset.x = 0.0;
}
// Fetch the opposite color and lerp by hand:
vec4 C = texture2D( colorTex, texcoord, 0.0 );
texcoord += sign( offset ) * resolution;
vec4 Cop = texture2D( colorTex, texcoord, 0.0 );
float s = abs( offset.x ) > abs( offset.y ) ? abs( offset.x ) : abs( offset.y );
// WebGL port note: Added gamma correction
C.xyz = pow(C.xyz, vec3(2.2));
Cop.xyz = pow(Cop.xyz, vec3(2.2));
vec4 mixed = mix(C, Cop, s);
mixed.xyz = pow(mixed.xyz, vec3(1.0 / 2.2));
return mixed;
}
}
void main() {
gl_FragColor = SMAANeighborhoodBlendingPS( vUv, vOffset, tColor, tDiffuse );
}`
};
export { SMAAEdgesShader, SMAAWeightsShader, SMAABlendShader };

View File

@ -0,0 +1,300 @@
import {
Matrix4,
Vector2
} from 'three';
/**
* References:
* http://john-chapman-graphics.blogspot.com/2013/01/ssao-tutorial.html
* https://learnopengl.com/Advanced-Lighting/SSAO
* https://github.com/McNopper/OpenGL/blob/master/Example28/shader/ssao.frag.glsl
*/
const SSAOShader = {
name: 'SSAOShader',
defines: {
'PERSPECTIVE_CAMERA': 1,
'KERNEL_SIZE': 32
},
uniforms: {
'tNormal': { value: null },
'tDepth': { value: null },
'tNoise': { value: null },
'kernel': { value: null },
'cameraNear': { value: null },
'cameraFar': { value: null },
'resolution': { value: new Vector2() },
'cameraProjectionMatrix': { value: new Matrix4() },
'cameraInverseProjectionMatrix': { value: new Matrix4() },
'kernelRadius': { value: 8 },
'minDistance': { value: 0.005 },
'maxDistance': { value: 0.05 },
},
vertexShader: /* glsl */`
varying vec2 vUv;
void main() {
vUv = uv;
gl_Position = projectionMatrix * modelViewMatrix * vec4( position, 1.0 );
}`,
fragmentShader: /* glsl */`
uniform highp sampler2D tNormal;
uniform highp sampler2D tDepth;
uniform sampler2D tNoise;
uniform vec3 kernel[ KERNEL_SIZE ];
uniform vec2 resolution;
uniform float cameraNear;
uniform float cameraFar;
uniform mat4 cameraProjectionMatrix;
uniform mat4 cameraInverseProjectionMatrix;
uniform float kernelRadius;
uniform float minDistance; // avoid artifacts caused by neighbour fragments with minimal depth difference
uniform float maxDistance; // avoid the influence of fragments which are too far away
varying vec2 vUv;
#include <packing>
float getDepth( const in vec2 screenPosition ) {
return texture2D( tDepth, screenPosition ).x;
}
float getLinearDepth( const in vec2 screenPosition ) {
#if PERSPECTIVE_CAMERA == 1
float fragCoordZ = texture2D( tDepth, screenPosition ).x;
float viewZ = perspectiveDepthToViewZ( fragCoordZ, cameraNear, cameraFar );
return viewZToOrthographicDepth( viewZ, cameraNear, cameraFar );
#else
return texture2D( tDepth, screenPosition ).x;
#endif
}
float getViewZ( const in float depth ) {
#if PERSPECTIVE_CAMERA == 1
return perspectiveDepthToViewZ( depth, cameraNear, cameraFar );
#else
return orthographicDepthToViewZ( depth, cameraNear, cameraFar );
#endif
}
vec3 getViewPosition( const in vec2 screenPosition, const in float depth, const in float viewZ ) {
float clipW = cameraProjectionMatrix[2][3] * viewZ + cameraProjectionMatrix[3][3];
vec4 clipPosition = vec4( ( vec3( screenPosition, depth ) - 0.5 ) * 2.0, 1.0 );
clipPosition *= clipW; // unprojection.
return ( cameraInverseProjectionMatrix * clipPosition ).xyz;
}
vec3 getViewNormal( const in vec2 screenPosition ) {
return unpackRGBToNormal( texture2D( tNormal, screenPosition ).xyz );
}
void main() {
float depth = getDepth( vUv );
if ( depth == 1.0 ) {
gl_FragColor = vec4( 1.0 ); // don't influence background
} else {
float viewZ = getViewZ( depth );
vec3 viewPosition = getViewPosition( vUv, depth, viewZ );
vec3 viewNormal = getViewNormal( vUv );
vec2 noiseScale = vec2( resolution.x / 4.0, resolution.y / 4.0 );
vec3 random = vec3( texture2D( tNoise, vUv * noiseScale ).r );
// compute matrix used to reorient a kernel vector
vec3 tangent = normalize( random - viewNormal * dot( random, viewNormal ) );
vec3 bitangent = cross( viewNormal, tangent );
mat3 kernelMatrix = mat3( tangent, bitangent, viewNormal );
float occlusion = 0.0;
for ( int i = 0; i < KERNEL_SIZE; i ++ ) {
vec3 sampleVector = kernelMatrix * kernel[ i ]; // reorient sample vector in view space
vec3 samplePoint = viewPosition + ( sampleVector * kernelRadius ); // calculate sample point
vec4 samplePointNDC = cameraProjectionMatrix * vec4( samplePoint, 1.0 ); // project point and calculate NDC
samplePointNDC /= samplePointNDC.w;
vec2 samplePointUv = samplePointNDC.xy * 0.5 + 0.5; // compute uv coordinates
float realDepth = getLinearDepth( samplePointUv ); // get linear depth from depth texture
float sampleDepth = viewZToOrthographicDepth( samplePoint.z, cameraNear, cameraFar ); // compute linear depth of the sample view Z value
float delta = sampleDepth - realDepth;
if ( delta > minDistance && delta < maxDistance ) { // if fragment is before sample point, increase occlusion
occlusion += 1.0;
}
}
occlusion = clamp( occlusion / float( KERNEL_SIZE ), 0.0, 1.0 );
gl_FragColor = vec4( vec3( 1.0 - occlusion ), 1.0 );
}
}`
};
const SSAODepthShader = {
name: 'SSAODepthShader',
defines: {
'PERSPECTIVE_CAMERA': 1
},
uniforms: {
'tDepth': { value: null },
'cameraNear': { value: null },
'cameraFar': { value: null },
},
vertexShader:
`varying vec2 vUv;
void main() {
vUv = uv;
gl_Position = projectionMatrix * modelViewMatrix * vec4( position, 1.0 );
}`,
fragmentShader:
`uniform sampler2D tDepth;
uniform float cameraNear;
uniform float cameraFar;
varying vec2 vUv;
#include <packing>
float getLinearDepth( const in vec2 screenPosition ) {
#if PERSPECTIVE_CAMERA == 1
float fragCoordZ = texture2D( tDepth, screenPosition ).x;
float viewZ = perspectiveDepthToViewZ( fragCoordZ, cameraNear, cameraFar );
return viewZToOrthographicDepth( viewZ, cameraNear, cameraFar );
#else
return texture2D( tDepth, screenPosition ).x;
#endif
}
void main() {
float depth = getLinearDepth( vUv );
gl_FragColor = vec4( vec3( 1.0 - depth ), 1.0 );
}`
};
const SSAOBlurShader = {
name: 'SSAOBlurShader',
uniforms: {
'tDiffuse': { value: null },
'resolution': { value: new Vector2() }
},
vertexShader:
`varying vec2 vUv;
void main() {
vUv = uv;
gl_Position = projectionMatrix * modelViewMatrix * vec4( position, 1.0 );
}`,
fragmentShader:
`uniform sampler2D tDiffuse;
uniform vec2 resolution;
varying vec2 vUv;
void main() {
vec2 texelSize = ( 1.0 / resolution );
float result = 0.0;
for ( int i = - 2; i <= 2; i ++ ) {
for ( int j = - 2; j <= 2; j ++ ) {
vec2 offset = ( vec2( float( i ), float( j ) ) ) * texelSize;
result += texture2D( tDiffuse, vUv + offset ).r;
}
}
gl_FragColor = vec4( vec3( result / ( 5.0 * 5.0 ) ), 1.0 );
}`
};
export { SSAOShader, SSAODepthShader, SSAOBlurShader };

View File

@ -0,0 +1,370 @@
import {
Matrix4,
Vector2
} from 'three';
/**
* References:
* https://lettier.github.io/3d-game-shaders-for-beginners/screen-space-reflection.html
*/
const SSRShader = {
name: 'SSRShader',
defines: {
MAX_STEP: 0,
PERSPECTIVE_CAMERA: true,
DISTANCE_ATTENUATION: true,
FRESNEL: true,
INFINITE_THICK: false,
SELECTIVE: false,
},
uniforms: {
'tDiffuse': { value: null },
'tNormal': { value: null },
'tMetalness': { value: null },
'tDepth': { value: null },
'cameraNear': { value: null },
'cameraFar': { value: null },
'resolution': { value: new Vector2() },
'cameraProjectionMatrix': { value: new Matrix4() },
'cameraInverseProjectionMatrix': { value: new Matrix4() },
'opacity': { value: .5 },
'maxDistance': { value: 180 },
'cameraRange': { value: 0 },
'thickness': { value: .018 }
},
vertexShader: /* glsl */`
varying vec2 vUv;
void main() {
vUv = uv;
gl_Position = projectionMatrix * modelViewMatrix * vec4( position, 1.0 );
}
`,
fragmentShader: /* glsl */`
// precision highp float;
precision highp sampler2D;
varying vec2 vUv;
uniform sampler2D tDepth;
uniform sampler2D tNormal;
uniform sampler2D tMetalness;
uniform sampler2D tDiffuse;
uniform float cameraRange;
uniform vec2 resolution;
uniform float opacity;
uniform float cameraNear;
uniform float cameraFar;
uniform float maxDistance;
uniform float thickness;
uniform mat4 cameraProjectionMatrix;
uniform mat4 cameraInverseProjectionMatrix;
#include <packing>
float pointToLineDistance(vec3 x0, vec3 x1, vec3 x2) {
//x0: point, x1: linePointA, x2: linePointB
//https://mathworld.wolfram.com/Point-LineDistance3-Dimensional.html
return length(cross(x0-x1,x0-x2))/length(x2-x1);
}
float pointPlaneDistance(vec3 point,vec3 planePoint,vec3 planeNormal){
// https://mathworld.wolfram.com/Point-PlaneDistance.html
//// https://en.wikipedia.org/wiki/Plane_(geometry)
//// http://paulbourke.net/geometry/pointlineplane/
float a=planeNormal.x,b=planeNormal.y,c=planeNormal.z;
float x0=point.x,y0=point.y,z0=point.z;
float x=planePoint.x,y=planePoint.y,z=planePoint.z;
float d=-(a*x+b*y+c*z);
float distance=(a*x0+b*y0+c*z0+d)/sqrt(a*a+b*b+c*c);
return distance;
}
float getDepth( const in vec2 uv ) {
return texture2D( tDepth, uv ).x;
}
float getViewZ( const in float depth ) {
#ifdef PERSPECTIVE_CAMERA
return perspectiveDepthToViewZ( depth, cameraNear, cameraFar );
#else
return orthographicDepthToViewZ( depth, cameraNear, cameraFar );
#endif
}
vec3 getViewPosition( const in vec2 uv, const in float depth/*clip space*/, const in float clipW ) {
vec4 clipPosition = vec4( ( vec3( uv, depth ) - 0.5 ) * 2.0, 1.0 );//ndc
clipPosition *= clipW; //clip
return ( cameraInverseProjectionMatrix * clipPosition ).xyz;//view
}
vec3 getViewNormal( const in vec2 uv ) {
return unpackRGBToNormal( texture2D( tNormal, uv ).xyz );
}
vec2 viewPositionToXY(vec3 viewPosition){
vec2 xy;
vec4 clip=cameraProjectionMatrix*vec4(viewPosition,1);
xy=clip.xy;//clip
float clipW=clip.w;
xy/=clipW;//NDC
xy=(xy+1.)/2.;//uv
xy*=resolution;//screen
return xy;
}
void main(){
#ifdef SELECTIVE
float metalness=texture2D(tMetalness,vUv).r;
if(metalness==0.) return;
#endif
float depth = getDepth( vUv );
float viewZ = getViewZ( depth );
if(-viewZ>=cameraFar) return;
float clipW = cameraProjectionMatrix[2][3] * viewZ+cameraProjectionMatrix[3][3];
vec3 viewPosition=getViewPosition( vUv, depth, clipW );
vec2 d0=gl_FragCoord.xy;
vec2 d1;
vec3 viewNormal=getViewNormal( vUv );
#ifdef PERSPECTIVE_CAMERA
vec3 viewIncidentDir=normalize(viewPosition);
vec3 viewReflectDir=reflect(viewIncidentDir,viewNormal);
#else
vec3 viewIncidentDir=vec3(0,0,-1);
vec3 viewReflectDir=reflect(viewIncidentDir,viewNormal);
#endif
float maxReflectRayLen=maxDistance/dot(-viewIncidentDir,viewNormal);
// dot(a,b)==length(a)*length(b)*cos(theta) // https://www.mathsisfun.com/algebra/vectors-dot-product.html
// if(a.isNormalized&&b.isNormalized) dot(a,b)==cos(theta)
// maxDistance/maxReflectRayLen=cos(theta)
// maxDistance/maxReflectRayLen==dot(a,b)
// maxReflectRayLen==maxDistance/dot(a,b)
vec3 d1viewPosition=viewPosition+viewReflectDir*maxReflectRayLen;
#ifdef PERSPECTIVE_CAMERA
if(d1viewPosition.z>-cameraNear){
//https://tutorial.math.lamar.edu/Classes/CalcIII/EqnsOfLines.aspx
float t=(-cameraNear-viewPosition.z)/viewReflectDir.z;
d1viewPosition=viewPosition+viewReflectDir*t;
}
#endif
d1=viewPositionToXY(d1viewPosition);
float totalLen=length(d1-d0);
float xLen=d1.x-d0.x;
float yLen=d1.y-d0.y;
float totalStep=max(abs(xLen),abs(yLen));
float xSpan=xLen/totalStep;
float ySpan=yLen/totalStep;
for(float i=0.;i<float(MAX_STEP);i++){
if(i>=totalStep) break;
vec2 xy=vec2(d0.x+i*xSpan,d0.y+i*ySpan);
if(xy.x<0.||xy.x>resolution.x||xy.y<0.||xy.y>resolution.y) break;
float s=length(xy-d0)/totalLen;
vec2 uv=xy/resolution;
float d = getDepth(uv);
float vZ = getViewZ( d );
if(-vZ>=cameraFar) continue;
float cW = cameraProjectionMatrix[2][3] * vZ+cameraProjectionMatrix[3][3];
vec3 vP=getViewPosition( uv, d, cW );
#ifdef PERSPECTIVE_CAMERA
// https://comp.nus.edu.sg/~lowkl/publications/lowk_persp_interp_techrep.pdf
float recipVPZ=1./viewPosition.z;
float viewReflectRayZ=1./(recipVPZ+s*(1./d1viewPosition.z-recipVPZ));
#else
float viewReflectRayZ=viewPosition.z+s*(d1viewPosition.z-viewPosition.z);
#endif
// if(viewReflectRayZ>vZ) continue; // will cause "npm run make-screenshot webgl_postprocessing_ssr" high probability hang.
// https://github.com/mrdoob/three.js/pull/21539#issuecomment-821061164
if(viewReflectRayZ<=vZ){
bool hit;
#ifdef INFINITE_THICK
hit=true;
#else
float away=pointToLineDistance(vP,viewPosition,d1viewPosition);
float minThickness;
vec2 xyNeighbor=xy;
xyNeighbor.x+=1.;
vec2 uvNeighbor=xyNeighbor/resolution;
vec3 vPNeighbor=getViewPosition(uvNeighbor,d,cW);
minThickness=vPNeighbor.x-vP.x;
minThickness*=3.;
float tk=max(minThickness,thickness);
hit=away<=tk;
#endif
if(hit){
vec3 vN=getViewNormal( uv );
if(dot(viewReflectDir,vN)>=0.) continue;
float distance=pointPlaneDistance(vP,viewPosition,viewNormal);
if(distance>maxDistance) break;
float op=opacity;
#ifdef DISTANCE_ATTENUATION
float ratio=1.-(distance/maxDistance);
float attenuation=ratio*ratio;
op=opacity*attenuation;
#endif
#ifdef FRESNEL
float fresnelCoe=(dot(viewIncidentDir,viewReflectDir)+1.)/2.;
op*=fresnelCoe;
#endif
vec4 reflectColor=texture2D(tDiffuse,uv);
gl_FragColor.xyz=reflectColor.xyz;
gl_FragColor.a=op;
break;
}
}
}
}
`
};
const SSRDepthShader = {
name: 'SSRDepthShader',
defines: {
'PERSPECTIVE_CAMERA': 1
},
uniforms: {
'tDepth': { value: null },
'cameraNear': { value: null },
'cameraFar': { value: null },
},
vertexShader: /* glsl */`
varying vec2 vUv;
void main() {
vUv = uv;
gl_Position = projectionMatrix * modelViewMatrix * vec4( position, 1.0 );
}
`,
fragmentShader: /* glsl */`
uniform sampler2D tDepth;
uniform float cameraNear;
uniform float cameraFar;
varying vec2 vUv;
#include <packing>
float getLinearDepth( const in vec2 uv ) {
#if PERSPECTIVE_CAMERA == 1
float fragCoordZ = texture2D( tDepth, uv ).x;
float viewZ = perspectiveDepthToViewZ( fragCoordZ, cameraNear, cameraFar );
return viewZToOrthographicDepth( viewZ, cameraNear, cameraFar );
#else
return texture2D( tDepth, uv ).x;
#endif
}
void main() {
float depth = getLinearDepth( vUv );
float d = 1.0 - depth;
// d=(d-.999)*1000.;
gl_FragColor = vec4( vec3( d ), 1.0 );
}
`
};
const SSRBlurShader = {
name: 'SSRBlurShader',
uniforms: {
'tDiffuse': { value: null },
'resolution': { value: new Vector2() },
'opacity': { value: .5 },
},
vertexShader: /* glsl */`
varying vec2 vUv;
void main() {
vUv = uv;
gl_Position = projectionMatrix * modelViewMatrix * vec4( position, 1.0 );
}
`,
fragmentShader: /* glsl */`
uniform sampler2D tDiffuse;
uniform vec2 resolution;
varying vec2 vUv;
void main() {
//reverse engineering from PhotoShop blur filter, then change coefficient
vec2 texelSize = ( 1.0 / resolution );
vec4 c=texture2D(tDiffuse,vUv);
vec2 offset;
offset=(vec2(-1,0))*texelSize;
vec4 cl=texture2D(tDiffuse,vUv+offset);
offset=(vec2(1,0))*texelSize;
vec4 cr=texture2D(tDiffuse,vUv+offset);
offset=(vec2(0,-1))*texelSize;
vec4 cb=texture2D(tDiffuse,vUv+offset);
offset=(vec2(0,1))*texelSize;
vec4 ct=texture2D(tDiffuse,vUv+offset);
// float coeCenter=.5;
// float coeSide=.125;
float coeCenter=.2;
float coeSide=.2;
float a=c.a*coeCenter+cl.a*coeSide+cr.a*coeSide+cb.a*coeSide+ct.a*coeSide;
vec3 rgb=(c.rgb*c.a*coeCenter+cl.rgb*cl.a*coeSide+cr.rgb*cr.a*coeSide+cb.rgb*cb.a*coeSide+ct.rgb*ct.a*coeSide)/a;
gl_FragColor=vec4(rgb,a);
}
`
};
export { SSRShader, SSRDepthShader, SSRBlurShader };

View File

@ -0,0 +1,52 @@
/**
* Sepia tone shader
* based on glfx.js sepia shader
* https://github.com/evanw/glfx.js
*/
const SepiaShader = {
name: 'SepiaShader',
uniforms: {
'tDiffuse': { value: null },
'amount': { value: 1.0 }
},
vertexShader: /* glsl */`
varying vec2 vUv;
void main() {
vUv = uv;
gl_Position = projectionMatrix * modelViewMatrix * vec4( position, 1.0 );
}`,
fragmentShader: /* glsl */`
uniform float amount;
uniform sampler2D tDiffuse;
varying vec2 vUv;
void main() {
vec4 color = texture2D( tDiffuse, vUv );
vec3 c = color.rgb;
color.r = dot( c, vec3( 1.0 - 0.607 * amount, 0.769 * amount, 0.189 * amount ) );
color.g = dot( c, vec3( 0.349 * amount, 1.0 - 0.314 * amount, 0.168 * amount ) );
color.b = dot( c, vec3( 0.272 * amount, 0.534 * amount, 1.0 - 0.869 * amount ) );
gl_FragColor = vec4( min( vec3( 1.0 ), color.rgb ), color.a );
}`
};
export { SepiaShader };

View File

@ -0,0 +1,92 @@
import {
Vector2
} from 'three';
/**
* Sobel Edge Detection (see https://youtu.be/uihBwtPIBxM)
*
* As mentioned in the video the Sobel operator expects a grayscale image as input.
*
*/
const SobelOperatorShader = {
name: 'SobelOperatorShader',
uniforms: {
'tDiffuse': { value: null },
'resolution': { value: new Vector2() }
},
vertexShader: /* glsl */`
varying vec2 vUv;
void main() {
vUv = uv;
gl_Position = projectionMatrix * modelViewMatrix * vec4( position, 1.0 );
}`,
fragmentShader: /* glsl */`
uniform sampler2D tDiffuse;
uniform vec2 resolution;
varying vec2 vUv;
void main() {
vec2 texel = vec2( 1.0 / resolution.x, 1.0 / resolution.y );
// kernel definition (in glsl matrices are filled in column-major order)
const mat3 Gx = mat3( -1, -2, -1, 0, 0, 0, 1, 2, 1 ); // x direction kernel
const mat3 Gy = mat3( -1, 0, 1, -2, 0, 2, -1, 0, 1 ); // y direction kernel
// fetch the 3x3 neighbourhood of a fragment
// first column
float tx0y0 = texture2D( tDiffuse, vUv + texel * vec2( -1, -1 ) ).r;
float tx0y1 = texture2D( tDiffuse, vUv + texel * vec2( -1, 0 ) ).r;
float tx0y2 = texture2D( tDiffuse, vUv + texel * vec2( -1, 1 ) ).r;
// second column
float tx1y0 = texture2D( tDiffuse, vUv + texel * vec2( 0, -1 ) ).r;
float tx1y1 = texture2D( tDiffuse, vUv + texel * vec2( 0, 0 ) ).r;
float tx1y2 = texture2D( tDiffuse, vUv + texel * vec2( 0, 1 ) ).r;
// third column
float tx2y0 = texture2D( tDiffuse, vUv + texel * vec2( 1, -1 ) ).r;
float tx2y1 = texture2D( tDiffuse, vUv + texel * vec2( 1, 0 ) ).r;
float tx2y2 = texture2D( tDiffuse, vUv + texel * vec2( 1, 1 ) ).r;
// gradient value in x direction
float valueGx = Gx[0][0] * tx0y0 + Gx[1][0] * tx1y0 + Gx[2][0] * tx2y0 +
Gx[0][1] * tx0y1 + Gx[1][1] * tx1y1 + Gx[2][1] * tx2y1 +
Gx[0][2] * tx0y2 + Gx[1][2] * tx1y2 + Gx[2][2] * tx2y2;
// gradient value in y direction
float valueGy = Gy[0][0] * tx0y0 + Gy[1][0] * tx1y0 + Gy[2][0] * tx2y0 +
Gy[0][1] * tx0y1 + Gy[1][1] * tx1y1 + Gy[2][1] * tx2y1 +
Gy[0][2] * tx0y2 + Gy[1][2] * tx1y2 + Gy[2][2] * tx2y2;
// magnitute of the total gradient
float G = sqrt( ( valueGx * valueGx ) + ( valueGy * valueGy ) );
gl_FragColor = vec4( vec3( G ), 1 );
}`
};
export { SobelOperatorShader };

View File

@ -0,0 +1,90 @@
import {
Color,
ShaderChunk,
ShaderLib,
UniformsUtils
} from 'three';
/**
* ------------------------------------------------------------------------------------------
* Subsurface Scattering shader
* Based on GDC 2011 Approximating Translucency for a Fast, Cheap and Convincing Subsurface Scattering Look
* https://colinbarrebrisebois.com/2011/03/07/gdc-2011-approximating-translucency-for-a-fast-cheap-and-convincing-subsurface-scattering-look/
*------------------------------------------------------------------------------------------
*/
function replaceAll( string, find, replace ) {
return string.split( find ).join( replace );
}
const meshphong_frag_head = ShaderChunk[ 'meshphong_frag' ].slice( 0, ShaderChunk[ 'meshphong_frag' ].indexOf( 'void main() {' ) );
const meshphong_frag_body = ShaderChunk[ 'meshphong_frag' ].slice( ShaderChunk[ 'meshphong_frag' ].indexOf( 'void main() {' ) );
const SubsurfaceScatteringShader = {
name: 'SubsurfaceScatteringShader',
uniforms: UniformsUtils.merge( [
ShaderLib[ 'phong' ].uniforms,
{
'thicknessMap': { value: null },
'thicknessColor': { value: new Color( 0xffffff ) },
'thicknessDistortion': { value: 0.1 },
'thicknessAmbient': { value: 0.0 },
'thicknessAttenuation': { value: 0.1 },
'thicknessPower': { value: 2.0 },
'thicknessScale': { value: 10.0 }
}
] ),
vertexShader: [
'#define USE_UV',
ShaderChunk[ 'meshphong_vert' ],
].join( '\n' ),
fragmentShader: [
'#define USE_UV',
'#define SUBSURFACE',
meshphong_frag_head,
'uniform sampler2D thicknessMap;',
'uniform float thicknessPower;',
'uniform float thicknessScale;',
'uniform float thicknessDistortion;',
'uniform float thicknessAmbient;',
'uniform float thicknessAttenuation;',
'uniform vec3 thicknessColor;',
'void RE_Direct_Scattering(const in IncidentLight directLight, const in vec2 uv, const in vec3 geometryPosition, const in vec3 geometryNormal, const in vec3 geometryViewDir, const in vec3 geometryClearcoatNormal, inout ReflectedLight reflectedLight) {',
' vec3 thickness = thicknessColor * texture2D(thicknessMap, uv).r;',
' vec3 scatteringHalf = normalize(directLight.direction + (geometryNormal * thicknessDistortion));',
' float scatteringDot = pow(saturate(dot(geometryViewDir, -scatteringHalf)), thicknessPower) * thicknessScale;',
' vec3 scatteringIllu = (scatteringDot + thicknessAmbient) * thickness;',
' reflectedLight.directDiffuse += scatteringIllu * thicknessAttenuation * directLight.color;',
'}',
meshphong_frag_body.replace( '#include <lights_fragment_begin>',
replaceAll(
ShaderChunk[ 'lights_fragment_begin' ],
'RE_Direct( directLight, geometryPosition, geometryNormal, geometryViewDir, geometryClearcoatNormal, material, reflectedLight );',
[
'RE_Direct( directLight, geometryPosition, geometryNormal, geometryViewDir, geometryClearcoatNormal, material, reflectedLight );',
'#if defined( SUBSURFACE ) && defined( USE_UV )',
' RE_Direct_Scattering(directLight, vUv, geometryPosition, geometryNormal, geometryViewDir, geometryClearcoatNormal, reflectedLight);',
'#endif',
].join( '\n' )
),
),
].join( '\n' ),
};
export { SubsurfaceScatteringShader };

View File

@ -0,0 +1,45 @@
/**
* Technicolor Shader
* Simulates the look of the two-strip technicolor process popular in early 20th century films.
* More historical info here: http://www.widescreenmuseum.com/oldcolor/technicolor1.htm
* Demo here: http://charliehoey.com/technicolor_shader/shader_test.html
*/
const TechnicolorShader = {
name: 'TechnicolorShader',
uniforms: {
'tDiffuse': { value: null }
},
vertexShader: /* glsl */`
varying vec2 vUv;
void main() {
vUv = uv;
gl_Position = projectionMatrix * modelViewMatrix * vec4( position, 1.0 );
}`,
fragmentShader: /* glsl */`
uniform sampler2D tDiffuse;
varying vec2 vUv;
void main() {
vec4 tex = texture2D( tDiffuse, vec2( vUv.x, vUv.y ) );
vec4 newTex = vec4(tex.r, (tex.g + tex.b) * .5, (tex.g + tex.b) * .5, 1.0);
gl_FragColor = newTex;
}`
};
export { TechnicolorShader };

View File

@ -0,0 +1,326 @@
import {
Color,
Vector3
} from 'three';
/**
* Currently contains:
*
* toon1
* toon2
* hatching
* dotted
*/
const ToonShader1 = {
uniforms: {
'uDirLightPos': { value: new Vector3() },
'uDirLightColor': { value: new Color( 0xeeeeee ) },
'uAmbientLightColor': { value: new Color( 0x050505 ) },
'uBaseColor': { value: new Color( 0xffffff ) }
},
vertexShader: /* glsl */`
varying vec3 vNormal;
varying vec3 vRefract;
void main() {
vec4 worldPosition = modelMatrix * vec4( position, 1.0 );
vec4 mvPosition = modelViewMatrix * vec4( position, 1.0 );
vec3 worldNormal = normalize ( mat3( modelMatrix[0].xyz, modelMatrix[1].xyz, modelMatrix[2].xyz ) * normal );
vNormal = normalize( normalMatrix * normal );
vec3 I = worldPosition.xyz - cameraPosition;
vRefract = refract( normalize( I ), worldNormal, 1.02 );
gl_Position = projectionMatrix * mvPosition;
}`,
fragmentShader: /* glsl */`
uniform vec3 uBaseColor;
uniform vec3 uDirLightPos;
uniform vec3 uDirLightColor;
uniform vec3 uAmbientLightColor;
varying vec3 vNormal;
varying vec3 vRefract;
void main() {
float directionalLightWeighting = max( dot( normalize( vNormal ), uDirLightPos ), 0.0);
vec3 lightWeighting = uAmbientLightColor + uDirLightColor * directionalLightWeighting;
float intensity = smoothstep( - 0.5, 1.0, pow( length(lightWeighting), 20.0 ) );
intensity += length(lightWeighting) * 0.2;
float cameraWeighting = dot( normalize( vNormal ), vRefract );
intensity += pow( 1.0 - length( cameraWeighting ), 6.0 );
intensity = intensity * 0.2 + 0.3;
if ( intensity < 0.50 ) {
gl_FragColor = vec4( 2.0 * intensity * uBaseColor, 1.0 );
} else {
gl_FragColor = vec4( 1.0 - 2.0 * ( 1.0 - intensity ) * ( 1.0 - uBaseColor ), 1.0 );
}
#include <colorspace_fragment>
}`
};
const ToonShader2 = {
uniforms: {
'uDirLightPos': { value: new Vector3() },
'uDirLightColor': { value: new Color( 0xeeeeee ) },
'uAmbientLightColor': { value: new Color( 0x050505 ) },
'uBaseColor': { value: new Color( 0xeeeeee ) },
'uLineColor1': { value: new Color( 0x808080 ) },
'uLineColor2': { value: new Color( 0x000000 ) },
'uLineColor3': { value: new Color( 0x000000 ) },
'uLineColor4': { value: new Color( 0x000000 ) }
},
vertexShader: /* glsl */`
varying vec3 vNormal;
void main() {
gl_Position = projectionMatrix * modelViewMatrix * vec4( position, 1.0 );
vNormal = normalize( normalMatrix * normal );
}`,
fragmentShader: /* glsl */`
uniform vec3 uBaseColor;
uniform vec3 uLineColor1;
uniform vec3 uLineColor2;
uniform vec3 uLineColor3;
uniform vec3 uLineColor4;
uniform vec3 uDirLightPos;
uniform vec3 uDirLightColor;
uniform vec3 uAmbientLightColor;
varying vec3 vNormal;
void main() {
float camera = max( dot( normalize( vNormal ), vec3( 0.0, 0.0, 1.0 ) ), 0.4);
float light = max( dot( normalize( vNormal ), uDirLightPos ), 0.0);
gl_FragColor = vec4( uBaseColor, 1.0 );
if ( length(uAmbientLightColor + uDirLightColor * light) < 1.00 ) {
gl_FragColor *= vec4( uLineColor1, 1.0 );
}
if ( length(uAmbientLightColor + uDirLightColor * camera) < 0.50 ) {
gl_FragColor *= vec4( uLineColor2, 1.0 );
}
#include <colorspace_fragment>
}`
};
const ToonShaderHatching = {
uniforms: {
'uDirLightPos': { value: new Vector3() },
'uDirLightColor': { value: new Color( 0xeeeeee ) },
'uAmbientLightColor': { value: new Color( 0x050505 ) },
'uBaseColor': { value: new Color( 0xffffff ) },
'uLineColor1': { value: new Color( 0x000000 ) },
'uLineColor2': { value: new Color( 0x000000 ) },
'uLineColor3': { value: new Color( 0x000000 ) },
'uLineColor4': { value: new Color( 0x000000 ) }
},
vertexShader: /* glsl */`
varying vec3 vNormal;
void main() {
gl_Position = projectionMatrix * modelViewMatrix * vec4( position, 1.0 );
vNormal = normalize( normalMatrix * normal );
}`,
fragmentShader: /* glsl */`
uniform vec3 uBaseColor;
uniform vec3 uLineColor1;
uniform vec3 uLineColor2;
uniform vec3 uLineColor3;
uniform vec3 uLineColor4;
uniform vec3 uDirLightPos;
uniform vec3 uDirLightColor;
uniform vec3 uAmbientLightColor;
varying vec3 vNormal;
void main() {
float directionalLightWeighting = max( dot( normalize(vNormal), uDirLightPos ), 0.0);
vec3 lightWeighting = uAmbientLightColor + uDirLightColor * directionalLightWeighting;
gl_FragColor = vec4( uBaseColor, 1.0 );
if ( length(lightWeighting) < 1.00 ) {
if ( mod(gl_FragCoord.x + gl_FragCoord.y, 10.0) == 0.0) {
gl_FragColor = vec4( uLineColor1, 1.0 );
}
}
if ( length(lightWeighting) < 0.75 ) {
if (mod(gl_FragCoord.x - gl_FragCoord.y, 10.0) == 0.0) {
gl_FragColor = vec4( uLineColor2, 1.0 );
}
}
if ( length(lightWeighting) < 0.50 ) {
if (mod(gl_FragCoord.x + gl_FragCoord.y - 5.0, 10.0) == 0.0) {
gl_FragColor = vec4( uLineColor3, 1.0 );
}
}
if ( length(lightWeighting) < 0.3465 ) {
if (mod(gl_FragCoord.x - gl_FragCoord.y - 5.0, 10.0) == 0.0) {
gl_FragColor = vec4( uLineColor4, 1.0 );
}
}
#include <colorspace_fragment>
}`
};
const ToonShaderDotted = {
uniforms: {
'uDirLightPos': { value: new Vector3() },
'uDirLightColor': { value: new Color( 0xeeeeee ) },
'uAmbientLightColor': { value: new Color( 0x050505 ) },
'uBaseColor': { value: new Color( 0xffffff ) },
'uLineColor1': { value: new Color( 0x000000 ) }
},
vertexShader: /* glsl */`
varying vec3 vNormal;
void main() {
gl_Position = projectionMatrix * modelViewMatrix * vec4( position, 1.0 );
vNormal = normalize( normalMatrix * normal );
}`,
fragmentShader: /* glsl */`
uniform vec3 uBaseColor;
uniform vec3 uLineColor1;
uniform vec3 uLineColor2;
uniform vec3 uLineColor3;
uniform vec3 uLineColor4;
uniform vec3 uDirLightPos;
uniform vec3 uDirLightColor;
uniform vec3 uAmbientLightColor;
varying vec3 vNormal;
void main() {
float directionalLightWeighting = max( dot( normalize(vNormal), uDirLightPos ), 0.0);
vec3 lightWeighting = uAmbientLightColor + uDirLightColor * directionalLightWeighting;
gl_FragColor = vec4( uBaseColor, 1.0 );
if ( length(lightWeighting) < 1.00 ) {
if ( ( mod(gl_FragCoord.x, 4.001) + mod(gl_FragCoord.y, 4.0) ) > 6.00 ) {
gl_FragColor = vec4( uLineColor1, 1.0 );
}
}
if ( length(lightWeighting) < 0.50 ) {
if ( ( mod(gl_FragCoord.x + 2.0, 4.001) + mod(gl_FragCoord.y + 2.0, 4.0) ) > 6.00 ) {
gl_FragColor = vec4( uLineColor1, 1.0 );
}
}
#include <colorspace_fragment>
}`
};
export { ToonShader1, ToonShader2, ToonShaderHatching, ToonShaderDotted };

View File

@ -0,0 +1,74 @@
import {
Vector2
} from 'three';
/**
* Triangle blur shader
* based on glfx.js triangle blur shader
* https://github.com/evanw/glfx.js
*
* A basic blur filter, which convolves the image with a
* pyramid filter. The pyramid filter is separable and is applied as two
* perpendicular triangle filters.
*/
const TriangleBlurShader = {
name: 'TriangleBlurShader',
uniforms: {
'texture': { value: null },
'delta': { value: new Vector2( 1, 1 ) }
},
vertexShader: /* glsl */`
varying vec2 vUv;
void main() {
vUv = uv;
gl_Position = projectionMatrix * modelViewMatrix * vec4( position, 1.0 );
}`,
fragmentShader: /* glsl */`
#include <common>
#define ITERATIONS 10.0
uniform sampler2D texture;
uniform vec2 delta;
varying vec2 vUv;
void main() {
vec4 color = vec4( 0.0 );
float total = 0.0;
// randomize the lookup values to hide the fixed number of samples
float offset = rand( vUv );
for ( float t = -ITERATIONS; t <= ITERATIONS; t ++ ) {
float percent = ( t + offset - 0.5 ) / ITERATIONS;
float weight = 1.0 - abs( percent );
color += texture2D( texture, vUv + delta * percent ) * weight;
total += weight;
}
gl_FragColor = color / total;
}`
};
export { TriangleBlurShader };

View File

@ -0,0 +1,47 @@
/**
* Unpack RGBA depth shader
* - show RGBA encoded depth as monochrome color
*/
const UnpackDepthRGBAShader = {
name: 'UnpackDepthRGBAShader',
uniforms: {
'tDiffuse': { value: null },
'opacity': { value: 1.0 }
},
vertexShader: /* glsl */`
varying vec2 vUv;
void main() {
vUv = uv;
gl_Position = projectionMatrix * modelViewMatrix * vec4( position, 1.0 );
}`,
fragmentShader: /* glsl */`
uniform float opacity;
uniform sampler2D tDiffuse;
varying vec2 vUv;
#include <packing>
void main() {
float depth = 1.0 - unpackRGBAToDepth( texture2D( tDiffuse, vUv ) );
gl_FragColor = vec4( vec3( depth ), opacity );
}`
};
export { UnpackDepthRGBAShader };

View File

@ -0,0 +1,130 @@
import {
UniformsLib,
UniformsUtils,
Matrix4
} from 'three';
/**
* Mesh Velocity Shader @bhouston
*/
const VelocityShader = {
name: 'VelocityShader',
uniforms: UniformsUtils.merge( [
UniformsLib.common,
UniformsLib.displacementmap,
{
modelMatrixPrev: { value: new Matrix4() },
currentProjectionViewMatrix: { value: new Matrix4() },
previousProjectionViewMatrix: { value: new Matrix4() }
}
] ),
vertexShader: /* glsl */`
#define NORMAL
#if defined( FLAT_SHADED ) || defined( USE_BUMPMAP ) || defined( USE_NORMALMAP_TANGENTSPACE )
varying vec3 vViewPosition;
#endif
#include <common>
#include <packing>
#include <uv_pars_vertex>
#include <displacementmap_pars_vertex>
#include <normal_pars_vertex>
#include <morphtarget_pars_vertex>
#include <skinning_pars_vertex>
#include <logdepthbuf_pars_vertex>
#include <clipping_planes_pars_vertex>
uniform mat4 previousProjectionViewMatrix;
uniform mat4 currentProjectionViewMatrix;
uniform mat4 modelMatrixPrev;
varying vec4 clipPositionCurrent;
varying vec4 clipPositionPrevious;
void main() {
#include <uv_vertex>
#include <beginnormal_vertex>
#include <morphnormal_vertex>
#include <skinbase_vertex>
#include <skinnormal_vertex>
#include <defaultnormal_vertex>
#include <normal_vertex>
#include <begin_vertex>
#include <morphtarget_vertex>
#include <displacementmap_vertex>
#include <morphtarget_vertex>
#include <skinning_vertex>
#ifdef USE_SKINNING
vec4 mvPosition = modelViewMatrix * skinned;
clipPositionCurrent = currentProjectionViewMatrix * modelMatrix * skinned;
clipPositionPrevious = previousProjectionViewMatrix * modelMatrixPrev * skinned;
#else
vec4 mvPosition = modelViewMatrix * vec4( transformed, 1.0 );
clipPositionCurrent = currentProjectionViewMatrix * modelMatrix * vec4( transformed, 1.0 );
clipPositionPrevious = previousProjectionViewMatrix * modelMatrixPrev * vec4( transformed, 1.0 );
#endif
gl_Position = projectionMatrix * mvPosition;
#include <logdepthbuf_vertex>
#include <clipping_planes_vertex>
}
`,
fragmentShader: /* glsl */`
#define NORMAL
uniform float opacity;
#include <packing>
#include <uv_pars_fragment>
#include <map_pars_fragment>
#include <alphamap_pars_fragment>
#include <alphatest_pars_fragment>
#include <logdepthbuf_pars_fragment>
#include <clipping_planes_pars_fragment>
varying vec4 clipPositionCurrent;
varying vec4 clipPositionPrevious;
void main() {
vec4 diffuseColor = vec4( 1.0 );
diffuseColor.a = opacity;
#include <map_fragment>
#include <alphamap_fragment>
#include <alphatest_fragment>
vec2 ndcPositionCurrent = clipPositionCurrent.xy/clipPositionCurrent.w;
vec2 ndcPositionPrevious = clipPositionPrevious.xy/clipPositionPrevious.w;
vec2 vel = ( ndcPositionCurrent - ndcPositionPrevious ) * 0.5;
vel = vel * 0.5 + 0.5;
vec2 v1 = packDepthToRG(vel.x);
vec2 v2 = packDepthToRG(vel.y);
gl_FragColor = vec4(v1.x, v1.y, v2.x, v2.y);
#include <logdepthbuf_fragment>
}
`
};
export { VelocityShader };

View File

@ -0,0 +1,59 @@
/**
* Two pass Gaussian blur filter (horizontal and vertical blur shaders)
* - see http://www.cake23.de/traveling-wavefronts-lit-up.html
*
* - 9 samples per pass
* - standard deviation 2.7
* - "h" and "v" parameters should be set to "1 / width" and "1 / height"
*/
const VerticalBlurShader = {
name: 'VerticalBlurShader',
uniforms: {
'tDiffuse': { value: null },
'v': { value: 1.0 / 512.0 }
},
vertexShader: /* glsl */`
varying vec2 vUv;
void main() {
vUv = uv;
gl_Position = projectionMatrix * modelViewMatrix * vec4( position, 1.0 );
}`,
fragmentShader: /* glsl */`
uniform sampler2D tDiffuse;
uniform float v;
varying vec2 vUv;
void main() {
vec4 sum = vec4( 0.0 );
sum += texture2D( tDiffuse, vec2( vUv.x, vUv.y - 4.0 * v ) ) * 0.051;
sum += texture2D( tDiffuse, vec2( vUv.x, vUv.y - 3.0 * v ) ) * 0.0918;
sum += texture2D( tDiffuse, vec2( vUv.x, vUv.y - 2.0 * v ) ) * 0.12245;
sum += texture2D( tDiffuse, vec2( vUv.x, vUv.y - 1.0 * v ) ) * 0.1531;
sum += texture2D( tDiffuse, vec2( vUv.x, vUv.y ) ) * 0.1633;
sum += texture2D( tDiffuse, vec2( vUv.x, vUv.y + 1.0 * v ) ) * 0.1531;
sum += texture2D( tDiffuse, vec2( vUv.x, vUv.y + 2.0 * v ) ) * 0.12245;
sum += texture2D( tDiffuse, vec2( vUv.x, vUv.y + 3.0 * v ) ) * 0.0918;
sum += texture2D( tDiffuse, vec2( vUv.x, vUv.y + 4.0 * v ) ) * 0.051;
gl_FragColor = sum;
}`
};
export { VerticalBlurShader };

View File

@ -0,0 +1,63 @@
/**
* Simple fake tilt-shift effect, modulating two pass Gaussian blur (see above) by vertical position
*
* - 9 samples per pass
* - standard deviation 2.7
* - "h" and "v" parameters should be set to "1 / width" and "1 / height"
* - "r" parameter control where "focused" horizontal line lies
*/
const VerticalTiltShiftShader = {
name: 'VerticalTiltShiftShader',
uniforms: {
'tDiffuse': { value: null },
'v': { value: 1.0 / 512.0 },
'r': { value: 0.35 }
},
vertexShader: /* glsl */`
varying vec2 vUv;
void main() {
vUv = uv;
gl_Position = projectionMatrix * modelViewMatrix * vec4( position, 1.0 );
}`,
fragmentShader: /* glsl */`
uniform sampler2D tDiffuse;
uniform float v;
uniform float r;
varying vec2 vUv;
void main() {
vec4 sum = vec4( 0.0 );
float vv = v * abs( r - vUv.y );
sum += texture2D( tDiffuse, vec2( vUv.x, vUv.y - 4.0 * vv ) ) * 0.051;
sum += texture2D( tDiffuse, vec2( vUv.x, vUv.y - 3.0 * vv ) ) * 0.0918;
sum += texture2D( tDiffuse, vec2( vUv.x, vUv.y - 2.0 * vv ) ) * 0.12245;
sum += texture2D( tDiffuse, vec2( vUv.x, vUv.y - 1.0 * vv ) ) * 0.1531;
sum += texture2D( tDiffuse, vec2( vUv.x, vUv.y ) ) * 0.1633;
sum += texture2D( tDiffuse, vec2( vUv.x, vUv.y + 1.0 * vv ) ) * 0.1531;
sum += texture2D( tDiffuse, vec2( vUv.x, vUv.y + 2.0 * vv ) ) * 0.12245;
sum += texture2D( tDiffuse, vec2( vUv.x, vUv.y + 3.0 * vv ) ) * 0.0918;
sum += texture2D( tDiffuse, vec2( vUv.x, vUv.y + 4.0 * vv ) ) * 0.051;
gl_FragColor = sum;
}`
};
export { VerticalTiltShiftShader };

View File

@ -0,0 +1,51 @@
/**
* Vignette shader
* based on PaintEffect postprocess from ro.me
* http://code.google.com/p/3-dreams-of-black/source/browse/deploy/js/effects/PaintEffect.js
*/
const VignetteShader = {
name: 'VignetteShader',
uniforms: {
'tDiffuse': { value: null },
'offset': { value: 1.0 },
'darkness': { value: 1.0 }
},
vertexShader: /* glsl */`
varying vec2 vUv;
void main() {
vUv = uv;
gl_Position = projectionMatrix * modelViewMatrix * vec4( position, 1.0 );
}`,
fragmentShader: /* glsl */`
uniform float offset;
uniform float darkness;
uniform sampler2D tDiffuse;
varying vec2 vUv;
void main() {
// Eskil's vignette
vec4 texel = texture2D( tDiffuse, vUv );
vec2 uv = ( vUv - vec2( 0.5 ) ) * vec2( offset );
gl_FragColor = vec4( mix( texel.rgb, vec3( 1.0 - darkness ), dot( uv, uv ) ), texel.a );
}`
};
export { VignetteShader };

View File

@ -0,0 +1,289 @@
import {
Vector2,
Vector3
} from 'three';
/**
* Shaders to render 3D volumes using raycasting.
* The applied techniques are based on similar implementations in the Visvis and Vispy projects.
* This is not the only approach, therefore it's marked 1.
*/
const VolumeRenderShader1 = {
uniforms: {
'u_size': { value: new Vector3( 1, 1, 1 ) },
'u_renderstyle': { value: 0 },
'u_renderthreshold': { value: 0.5 },
'u_clim': { value: new Vector2( 1, 1 ) },
'u_data': { value: null },
'u_cmdata': { value: null }
},
vertexShader: /* glsl */`
varying vec4 v_nearpos;
varying vec4 v_farpos;
varying vec3 v_position;
void main() {
// Prepare transforms to map to "camera view". See also:
// https://threejs.org/docs/#api/renderers/webgl/WebGLProgram
mat4 viewtransformf = modelViewMatrix;
mat4 viewtransformi = inverse(modelViewMatrix);
// Project local vertex coordinate to camera position. Then do a step
// backward (in cam coords) to the near clipping plane, and project back. Do
// the same for the far clipping plane. This gives us all the information we
// need to calculate the ray and truncate it to the viewing cone.
vec4 position4 = vec4(position, 1.0);
vec4 pos_in_cam = viewtransformf * position4;
// Intersection of ray and near clipping plane (z = -1 in clip coords)
pos_in_cam.z = -pos_in_cam.w;
v_nearpos = viewtransformi * pos_in_cam;
// Intersection of ray and far clipping plane (z = +1 in clip coords)
pos_in_cam.z = pos_in_cam.w;
v_farpos = viewtransformi * pos_in_cam;
// Set varyings and output pos
v_position = position;
gl_Position = projectionMatrix * viewMatrix * modelMatrix * position4;
}`,
fragmentShader: /* glsl */`
precision highp float;
precision mediump sampler3D;
uniform vec3 u_size;
uniform int u_renderstyle;
uniform float u_renderthreshold;
uniform vec2 u_clim;
uniform sampler3D u_data;
uniform sampler2D u_cmdata;
varying vec3 v_position;
varying vec4 v_nearpos;
varying vec4 v_farpos;
// The maximum distance through our rendering volume is sqrt(3).
const int MAX_STEPS = 887; // 887 for 512^3, 1774 for 1024^3
const int REFINEMENT_STEPS = 4;
const float relative_step_size = 1.0;
const vec4 ambient_color = vec4(0.2, 0.4, 0.2, 1.0);
const vec4 diffuse_color = vec4(0.8, 0.2, 0.2, 1.0);
const vec4 specular_color = vec4(1.0, 1.0, 1.0, 1.0);
const float shininess = 40.0;
void cast_mip(vec3 start_loc, vec3 step, int nsteps, vec3 view_ray);
void cast_iso(vec3 start_loc, vec3 step, int nsteps, vec3 view_ray);
float sample1(vec3 texcoords);
vec4 apply_colormap(float val);
vec4 add_lighting(float val, vec3 loc, vec3 step, vec3 view_ray);
void main() {
// Normalize clipping plane info
vec3 farpos = v_farpos.xyz / v_farpos.w;
vec3 nearpos = v_nearpos.xyz / v_nearpos.w;
// Calculate unit vector pointing in the view direction through this fragment.
vec3 view_ray = normalize(nearpos.xyz - farpos.xyz);
// Compute the (negative) distance to the front surface or near clipping plane.
// v_position is the back face of the cuboid, so the initial distance calculated in the dot
// product below is the distance from near clip plane to the back of the cuboid
float distance = dot(nearpos - v_position, view_ray);
distance = max(distance, min((-0.5 - v_position.x) / view_ray.x,
(u_size.x - 0.5 - v_position.x) / view_ray.x));
distance = max(distance, min((-0.5 - v_position.y) / view_ray.y,
(u_size.y - 0.5 - v_position.y) / view_ray.y));
distance = max(distance, min((-0.5 - v_position.z) / view_ray.z,
(u_size.z - 0.5 - v_position.z) / view_ray.z));
// Now we have the starting position on the front surface
vec3 front = v_position + view_ray * distance;
// Decide how many steps to take
int nsteps = int(-distance / relative_step_size + 0.5);
if ( nsteps < 1 )
discard;
// Get starting location and step vector in texture coordinates
vec3 step = ((v_position - front) / u_size) / float(nsteps);
vec3 start_loc = front / u_size;
// For testing: show the number of steps. This helps to establish
// whether the rays are correctly oriented
//'gl_FragColor = vec4(0.0, float(nsteps) / 1.0 / u_size.x, 1.0, 1.0);
//'return;
if (u_renderstyle == 0)
cast_mip(start_loc, step, nsteps, view_ray);
else if (u_renderstyle == 1)
cast_iso(start_loc, step, nsteps, view_ray);
if (gl_FragColor.a < 0.05)
discard;
}
float sample1(vec3 texcoords) {
/* Sample float value from a 3D texture. Assumes intensity data. */
return texture(u_data, texcoords.xyz).r;
}
vec4 apply_colormap(float val) {
val = (val - u_clim[0]) / (u_clim[1] - u_clim[0]);
return texture2D(u_cmdata, vec2(val, 0.5));
}
void cast_mip(vec3 start_loc, vec3 step, int nsteps, vec3 view_ray) {
float max_val = -1e6;
int max_i = 100;
vec3 loc = start_loc;
// Enter the raycasting loop. In WebGL 1 the loop index cannot be compared with
// non-constant expression. So we use a hard-coded max, and an additional condition
// inside the loop.
for (int iter=0; iter<MAX_STEPS; iter++) {
if (iter >= nsteps)
break;
// Sample from the 3D texture
float val = sample1(loc);
// Apply MIP operation
if (val > max_val) {
max_val = val;
max_i = iter;
}
// Advance location deeper into the volume
loc += step;
}
// Refine location, gives crispier images
vec3 iloc = start_loc + step * (float(max_i) - 0.5);
vec3 istep = step / float(REFINEMENT_STEPS);
for (int i=0; i<REFINEMENT_STEPS; i++) {
max_val = max(max_val, sample1(iloc));
iloc += istep;
}
// Resolve final color
gl_FragColor = apply_colormap(max_val);
}
void cast_iso(vec3 start_loc, vec3 step, int nsteps, vec3 view_ray) {
gl_FragColor = vec4(0.0); // init transparent
vec4 color3 = vec4(0.0); // final color
vec3 dstep = 1.5 / u_size; // step to sample derivative
vec3 loc = start_loc;
float low_threshold = u_renderthreshold - 0.02 * (u_clim[1] - u_clim[0]);
// Enter the raycasting loop. In WebGL 1 the loop index cannot be compared with
// non-constant expression. So we use a hard-coded max, and an additional condition
// inside the loop.
for (int iter=0; iter<MAX_STEPS; iter++) {
if (iter >= nsteps)
break;
// Sample from the 3D texture
float val = sample1(loc);
if (val > low_threshold) {
// Take the last interval in smaller steps
vec3 iloc = loc - 0.5 * step;
vec3 istep = step / float(REFINEMENT_STEPS);
for (int i=0; i<REFINEMENT_STEPS; i++) {
val = sample1(iloc);
if (val > u_renderthreshold) {
gl_FragColor = add_lighting(val, iloc, dstep, view_ray);
return;
}
iloc += istep;
}
}
// Advance location deeper into the volume
loc += step;
}
}
vec4 add_lighting(float val, vec3 loc, vec3 step, vec3 view_ray)
{
// Calculate color by incorporating lighting
// View direction
vec3 V = normalize(view_ray);
// calculate normal vector from gradient
vec3 N;
float val1, val2;
val1 = sample1(loc + vec3(-step[0], 0.0, 0.0));
val2 = sample1(loc + vec3(+step[0], 0.0, 0.0));
N[0] = val1 - val2;
val = max(max(val1, val2), val);
val1 = sample1(loc + vec3(0.0, -step[1], 0.0));
val2 = sample1(loc + vec3(0.0, +step[1], 0.0));
N[1] = val1 - val2;
val = max(max(val1, val2), val);
val1 = sample1(loc + vec3(0.0, 0.0, -step[2]));
val2 = sample1(loc + vec3(0.0, 0.0, +step[2]));
N[2] = val1 - val2;
val = max(max(val1, val2), val);
float gm = length(N); // gradient magnitude
N = normalize(N);
// Flip normal so it points towards viewer
float Nselect = float(dot(N, V) > 0.0);
N = (2.0 * Nselect - 1.0) * N; // == Nselect * N - (1.0-Nselect)*N;
// Init colors
vec4 ambient_color = vec4(0.0, 0.0, 0.0, 0.0);
vec4 diffuse_color = vec4(0.0, 0.0, 0.0, 0.0);
vec4 specular_color = vec4(0.0, 0.0, 0.0, 0.0);
// note: could allow multiple lights
for (int i=0; i<1; i++)
{
// Get light direction (make sure to prevent zero devision)
vec3 L = normalize(view_ray); //lightDirs[i];
float lightEnabled = float( length(L) > 0.0 );
L = normalize(L + (1.0 - lightEnabled));
// Calculate lighting properties
float lambertTerm = clamp(dot(N, L), 0.0, 1.0);
vec3 H = normalize(L+V); // Halfway vector
float specularTerm = pow(max(dot(H, N), 0.0), shininess);
// Calculate mask
float mask1 = lightEnabled;
// Calculate colors
ambient_color += mask1 * ambient_color; // * gl_LightSource[i].ambient;
diffuse_color += mask1 * lambertTerm;
specular_color += mask1 * specularTerm * specular_color;
}
// Calculate final color by componing different components
vec4 final_color;
vec4 color = apply_colormap(val);
final_color = color * (ambient_color + diffuse_color) + specular_color;
final_color.a = color.a;
return final_color;
}`
};
export { VolumeRenderShader1 };

View File

@ -0,0 +1,95 @@
const WaterRefractionShader = {
name: 'WaterRefractionShader',
uniforms: {
'color': {
value: null
},
'time': {
value: 0
},
'tDiffuse': {
value: null
},
'tDudv': {
value: null
},
'textureMatrix': {
value: null
}
},
vertexShader: /* glsl */`
uniform mat4 textureMatrix;
varying vec2 vUv;
varying vec4 vUvRefraction;
void main() {
vUv = uv;
vUvRefraction = textureMatrix * vec4( position, 1.0 );
gl_Position = projectionMatrix * modelViewMatrix * vec4( position, 1.0 );
}`,
fragmentShader: /* glsl */`
uniform vec3 color;
uniform float time;
uniform sampler2D tDiffuse;
uniform sampler2D tDudv;
varying vec2 vUv;
varying vec4 vUvRefraction;
float blendOverlay( float base, float blend ) {
return( base < 0.5 ? ( 2.0 * base * blend ) : ( 1.0 - 2.0 * ( 1.0 - base ) * ( 1.0 - blend ) ) );
}
vec3 blendOverlay( vec3 base, vec3 blend ) {
return vec3( blendOverlay( base.r, blend.r ), blendOverlay( base.g, blend.g ),blendOverlay( base.b, blend.b ) );
}
void main() {
float waveStrength = 0.5;
float waveSpeed = 0.03;
// simple distortion (ripple) via dudv map (see https://www.youtube.com/watch?v=6B7IF6GOu7s)
vec2 distortedUv = texture2D( tDudv, vec2( vUv.x + time * waveSpeed, vUv.y ) ).rg * waveStrength;
distortedUv = vUv.xy + vec2( distortedUv.x, distortedUv.y + time * waveSpeed );
vec2 distortion = ( texture2D( tDudv, distortedUv ).rg * 2.0 - 1.0 ) * waveStrength;
// new uv coords
vec4 uv = vec4( vUvRefraction );
uv.xy += distortion;
vec4 base = texture2DProj( tDiffuse, uv );
gl_FragColor = vec4( blendOverlay( base.rgb, color ), 1.0 );
#include <tonemapping_fragment>
#include <colorspace_fragment>
}`
};
export { WaterRefractionShader };