Pen Settings

HTML

CSS

CSS Base

Vendor Prefixing

Add External Stylesheets/Pens

Any URLs added here will be added as <link>s in order, and before the CSS in the editor. You can use the CSS from another Pen by using its URL and the proper URL extension.

+ add another resource

JavaScript

Babel includes JSX processing.

Add External Scripts/Pens

Any URL's added here will be added as <script>s in order, and run before the JavaScript in the editor. You can use the URL of any other Pen and it will include the JavaScript from that Pen.

+ add another resource

Packages

Add Packages

Search for and use JavaScript packages from npm here. By selecting a package, an import statement will be added to the top of the JavaScript editor for this package.

Behavior

Auto Save

If active, Pens will autosave every 30 seconds after being saved once.

Auto-Updating Preview

If enabled, the preview panel updates automatically as you code. If disabled, use the "Run" button to update.

Format on Save

If enabled, your code will be formatted when you actively save your Pen. Note: your code becomes un-folded during formatting.

Editor Settings

Code Indentation

Want to change your Syntax Highlighting theme, Fonts and more?

Visit your global Editor Settings.

HTML

              
                <script src="https://cdnjs.cloudflare.com/ajax/libs/three.js/88/three.min.js"></script>
<script id="vertexShader" type="x-shader/x-vertex">
    void main() {
        gl_Position = vec4( position, 1.0 );
    }
</script>
<script id="fragmentShader" type="x-shader/x-fragment">
    uniform vec2 u_resolution;
    uniform float u_time;
    uniform vec2 u_mouse;
    uniform sampler2D u_noise;
    uniform sampler2D u_map;
    uniform sampler2D  u_environment;
  
    #define PI 3.14159265359
    #define TAU 6.28318530718
    
    // Epsilon value
    const float eps = 0.005;
 
    const float scale = 0.9;
  
    const bool glitch = false; // whether to add the glitch effect
  
    // movement variables
    vec3 movement = vec3(.0);

    // Gloable variables for the raymarching algorithm.
    const int maxIterations = 512;
    const float stepScale = .4;
    const float stopThreshold = 0.001;
  
  
  mat4 rotationMatrix(vec3 axis, float angle)
  {
      axis = normalize(axis);
      float s = sin(angle);
      float c = cos(angle);
      float oc = 1.0 - c;

      return mat4(oc * axis.x * axis.x + c,           oc * axis.x * axis.y - axis.z * s,  oc * axis.z * axis.x + axis.y * s,  0.0,
                  oc * axis.x * axis.y + axis.z * s,  oc * axis.y * axis.y + c,           oc * axis.y * axis.z - axis.x * s,  0.0,
                  oc * axis.z * axis.x - axis.y * s,  oc * axis.y * axis.z + axis.x * s,  oc * axis.z * axis.z + c,           0.0,
                  0.0,                                0.0,                                0.0,                                1.0);
  }
  
  vec2 random2( vec2 p ) {
      return fract(sin(vec2(dot(p,vec2(127.1,311.7)),dot(p,vec2(269.5,183.3))))*43758.5453);
  }
  
  // The world!
  float world_sdf(in vec3 p, inout vec3 surfaceDetail) {
    float world = 10.; // World distance. Starting large
    float l, a, c, s; // length, angle, cos, sin
    vec3 _p; // P duplication
    float threads = 3.; // Number of threads (only relevant to the older stuff, now.)
    float z = p.z * 3.; // Z value, modified
    float t = sin(u_time) * .5 + 1.; // Time, modified
    
    c = cos(z * .59) * .8; // cos of depth
    s = sin(z * .6 + t * .5) * .8; // sin of depth, modified by time
    // The below is just a variation on a theme, using some FBM distortion instead.
    // c = cos(z * .5) * .8; // cos of depth
    // s = sin(z * .5) * .8; // sin of depth, modified by time
    // c *= cos(z * 1.5) * 1.1;
    
    p *= mat3( c, -s, 0., s, c, 0., 0., 0., 0. ); // standard rotation matrix
    _p = mod(p, .5);
    p = _p - (t * .1 + .1); // repeating the coordinates, standard stuff, but with a twist
    l = length(p.xy); // the length of the new coords
    world = (l) - .06; // the cylinder, .04 diameter
    surfaceDetail = _p;
    return world;
    
    
    
    
    
    // This stuff below is older
    // Leaving it here for posterity
    // -------------------------
    
    a = atan(p.y, p.x);
    l = length(p.xy);
    _p = vec3(cos(a * threads) * l, sin(a * threads) * l, p.z) - vec3(0., 0., 0.5);
    _p.x += 1. / threads;
    // vec3 _p = p - vec3(0., 0., 5.5);
    
    // _p *= 
    _p.x += cos(p.z * 3.) * .5;
    _p.y += sin(p.z * 3.) * .5;
    _p = (vec4(_p, 0.) * rotationMatrix(vec3(1., 0., 0.), 1.5704)).xyz;
    
    world = (length(mod(_p.xz, 2.) - 1.)) - .1;
    
    // _p *= vec3(1., 1., 1.);
    l = length(_p.xz);
    world = (l) - .1;
    
    return world;
  }
  float world_sdf(in vec3 p) {
    vec3 sd;
    return world_sdf(p, sd);
  }
  
  // Fuck yeah, normals!
  vec3 calculate_normal(in vec3 p)
  {
    const vec3 small_step = vec3(0.0001, 0.0, 0.0);
    
    float gradient_x = world_sdf(vec3(p.x + eps, p.y, p.z)) - world_sdf(vec3(p.x - eps, p.y, p.z));
    float gradient_y = world_sdf(vec3(p.x, p.y + eps, p.z)) - world_sdf(vec3(p.x, p.y - eps, p.z));
    float gradient_z = world_sdf(vec3(p.x, p.y, p.z  + eps)) - world_sdf(vec3(p.x, p.y, p.z - eps));
    
    vec3 normal = vec3(gradient_x, gradient_y, gradient_z);

    return normalize(normal);
  }

  // Raymarching.
  float rayMarching( vec3 origin, vec3 dir, float start, float end, inout float field, inout vec3 surfaceDetail ) {
    
    float sceneDist = 1e4;
    float rayDepth = start;
    for ( int i = 0; i < maxIterations; i++ ) {
      sceneDist = world_sdf( origin + dir * rayDepth, surfaceDetail ); // Distance from the point along the ray to the nearest surface point in the scene.

      if (( sceneDist < stopThreshold ) || (rayDepth >= end)) {        
        break;
      }
      // We haven't hit anything, so increase the depth by a scaled factor of the minimum scene distance.
      rayDepth += sceneDist * stepScale;
    }
  
    if ( sceneDist >= stopThreshold ) rayDepth = end;
    else rayDepth += sceneDist;
      
    // We've used up our maximum iterations. Return the maximum distance.
    return rayDepth;
  }

  // Based on original by IQ - optimized by Gary Warne
  float calculateAO(vec3 p, vec3 n)
  {
     const float AO_SAMPLES = 5.0;
     float r = 0.0;
     float w = 1.0;
     for (float i=1.0; i<=AO_SAMPLES; i++)
     {
        float d0 = i * 0.15; // 1.0/AO_SAMPLES
        r += w * (d0 - world_sdf(p + n * d0));
        w *= 0.5;
     }
     return 1.0-clamp(r,0.0,1.0);
  }
  //   Naive environment mapping. Pass the reflected vector and pull back the texture position for that ray.
  vec3 envMap(vec3 rd, vec3 sn){

      // rd.xy -= movement;
      rd /= scale; // scale the whole thing down a but from the scaled UVs
    
      vec3 col = texture2D(u_environment, rd.xy + sn.xy, 100.).rgb;
      col *= normalize(col);

      return col;

  }
  
  /**
   * Lighting
   * This stuff is way way better than the model I was using.
   * Courtesy Shane Warne
   * Reference: http://raymarching.com/
   * -------------------------------------
   * */
  
  // Lighting.
  vec3 lighting( vec3 sp, vec3 camPos, int reflectionPass, float dist, float field, vec3 rd, vec3 surfaceDetail) {
    
    // Start with black.
    vec3 sceneColor = vec3(0.0);

    vec3 objColor = vec3(1.0, .5, .5);

    // Obtain the surface normal at the scene position "sp."
    vec3 tex = texture2D(u_map, vec2(length(surfaceDetail.xz * 1.), sp.z * 5.)).rgb;
    tex = (tex.rrr + tex.brg * .5) * .5;
    // tex += vec3(sin(sp.z * 200.) * .2 + .2);
    vec3 surfNormal = calculate_normal(sp) + tex * tex * (.5 + u_mouse.y); // adding the tex here for some cheap bump mapping
    
    // objColor = texture2D(u_map, sp. * .5).rgb;
    objColor = surfNormal * .5 + .5;
    objColor = tex;
    objColor += envMap(rd, surfNormal) * (.5 + u_mouse.x);

    // Lighting.

    // lp - Light position. Keeping it in the vacinity of the camera, but away from the objects in the scene.
    vec3 lp = vec3(0., 0.0, -1.0) + movement;
    // ld - Light direction.
    vec3 ld = lp-sp;
    // lcolor - Light color.
    vec3 lcolor = vec3(1.,0.97,0.92) * .3;
    
     // Light falloff (attenuation).
    float len = length( ld ); // Distance from the light to the surface point.
    ld /= len; // Normalizing the light-to-surface, aka light-direction, vector.
    float lightAtten = min( 1.0 / ( 0.15*len*len ), 1.0 ); // Removed light attenuation for this because I want the fade to white
    
    float sceneLen = length(camPos - sp); // Distance of the camera to the surface point
    float sceneAtten = min( 1.0 / ( 0.015*sceneLen*sceneLen ), 1.0 ); // Keeps things between 0 and 1.   

    // Obtain the reflected vector at the scene position "sp."
    vec3 ref = reflect(-ld, surfNormal);
    
    float ao = 1.0; // Ambient occlusion.
    ao = calculateAO(sp, surfNormal); // Ambient occlusion.

    float ambient = .5; //The object's ambient property.
    float specularPower = 1.; // The power of the specularity. Higher numbers can give the object a harder, shinier look.
    float diffuse = max( 0.0, dot(surfNormal, ld) ); //The object's diffuse value.
    float specular = max( 0.0, dot( ref, normalize(camPos-sp)) ); //The object's specular value.
    specular = pow(specular, specularPower); // Ramping up the specular value to the specular power for a bit of shininess.
    	
    // Bringing all the lighting components togethr to color the screen pixel.
    sceneColor += (objColor*(diffuse*0.8+ambient)+specular*0.5)*lcolor*lightAtten*1.3;
    sceneColor = mix(sceneColor, vec3(0.), 1.-sceneAtten*sceneAtten); // fog
    
    return sceneColor;

  }

    void main() {
      
      // Setting up our screen coordinates.
      vec2 aspect = vec2(u_resolution.x/u_resolution.y, 1.0); //
      vec2 uv = (2.0*gl_FragCoord.xy/u_resolution.xy - 1.0)*aspect;
      
      
      gl_FragColor += texture2D(u_noise, (uv.xy) * (u_resolution.x) + (u_time * 10.)).x * .1; // adding some noise
      
      // Adding some glitch
      if(glitch) {
        vec2 r = random2(vec2(uv.y / 100., u_time / .5));
        if(r.x > 0.966) {
          uv.x += r.y;
        }
      }
      
      // This just gives us a touch of fisheye
      // uv *= 1. + dot(uv, uv) * 0.4;
      
      // movement
      movement = vec3(0., 0., u_time * 1.);
      // movement = vec3(u_time / 5., 0., u_time / 1.);
      // movement = vec3(0., 0., u_time / 6.);
      // movement.x = cos(u_time / 8.) * 4.;
      // movement.y = sin(u_time / 8.) * 4.;
      
      // This is the point you look towards, or at, if you prefer.
      vec3 lookAt = vec3(0., 0., 1.);
      // vec3 lookAt = vec3(cos(u_time), 0., sin(u_time) * -2.); // This version of the look at just circles around.
      vec3 camera_position = vec3(cos(u_time * .25) * .15, sin(u_time * .5) * .15, -1.0);
      // vec3 camera_position = vec3(clamp(0.5 - u_mouse.x, -.5, .5), 0.5 + u_mouse.y - .5, -1.0); // This is the point you look from, or camera you look at the scene through. Whichever way you wish to look at it.
      
      lookAt += movement;
      // lookAt.z += sin(u_time / 10.) * .5;
      // lookAt.x += cos(u_time / 10.) * .5;
      camera_position += movement;
      
      vec3 forward = normalize(lookAt-camera_position); // Forward vector.
      vec3 right = normalize(vec3(forward.z, 0., -forward.x )); // Right vector... or is it left? Either way, so long as the correct-facing up-vector is produced.
      vec3 up = normalize(cross(forward,right)); // Cross product the two vectors above to get the up vector.

      // FOV - Field of view.
      float FOV = 0.3;

      // ro - Ray origin.
      vec3 ro = camera_position; 
      // rd - Ray direction.
      vec3 rd = normalize(forward + FOV*uv.x*right + FOV*uv.y*up);
      
      // Ray marching.
      const float clipNear = 0.0;
      const float clipFar = 16.0;
      vec3 surfaceDetail;
      float field = 0.;
      float dist = rayMarching(ro, rd, clipNear, clipFar, field, surfaceDetail );
      if ( dist >= clipFar ) {
        // gl_FragColor = vec4(vec3(0.), 1.0);
        // gl_FragColor += texture2D(u_noise, (uv + (u_time * 10.)) * 1.).x * .1; // adding some noise
        return;
      }

      // sp - Surface position. If we've made it this far, we've hit something.
      vec3 sp = ro + rd*dist;

      // Light the pixel that corresponds to the surface position. The last entry indicates that it's not a reflection pass
      // which we're not up to yet.
      vec3 sceneColor = lighting( sp, camera_position, 0, dist, field, rd, surfaceDetail);

      // Clamping the lit pixel, then put it on the screen.
      gl_FragColor += vec4(clamp(sceneColor, 0.0, 1.0), 1.0);
      
      
      // gl_FragColor *= gl_FragColor * 2.9; // this just adds some contrast, I've left it out because I like the more muted look
    }
</script>


<div id="container" touch-action="none"></div>
              
            
!

CSS

              
                body {
  background: #000;
  margin: 0;
  padding: 0;
}

#container {
  position: fixed;
  touch-action: none;
}
              
            
!

JS

              
                /*
Most of the stuff in here is just bootstrapping. Essentially it's just
setting ThreeJS up so that it renders a flat surface upon which to draw 
the shader. The only thing to see here really is the uniforms sent to 
the shader. Apart from that all of the magic happens in the HTML view
under the fragment shader.
*/

let container;
let camera, scene, renderer;
let uniforms;

let loader=new THREE.TextureLoader();
let texture, map, environment;
loader.setCrossOrigin("anonymous");
loader.load(
  'https://s3-us-west-2.amazonaws.com/s.cdpn.io/982762/noise.png',
  function do_something_with_texture(tex) {
    texture = tex;
    texture.wrapS = THREE.RepeatWrapping;
    texture.wrapT = THREE.RepeatWrapping;
    texture.minFilter = THREE.LinearFilter;
    loader.load(
      // 'https://s3-us-west-2.amazonaws.com/s.cdpn.io/982762/tiling-mosaic%202.jpg',
      'https://s3-us-west-2.amazonaws.com/s.cdpn.io/982762/Brick-2377.jpg',
      function do_something_with_texture(tex) {
        map = tex;
        map.wrapS = THREE.RepeatWrapping;
        map.wrapT = THREE.RepeatWrapping;
        map.minFilter = THREE.LinearFilter;
        loader.load( 
          'https://s3-us-west-2.amazonaws.com/s.cdpn.io/982762/env_lat-lon.png',
          function environment_load(tex) {
            environment = tex;
            environment.wrapS = THREE.RepeatWrapping;
            environment.wrapT = THREE.RepeatWrapping;
            environment.minFilter = THREE.LinearFilter;
            init();
            animate();
          }
        );
      });
  }
);

function init() {
  container = document.getElementById( 'container' );

  camera = new THREE.Camera();
  camera.position.z = 1;

  scene = new THREE.Scene();

  var geometry = new THREE.PlaneBufferGeometry( 2, 2 );

  uniforms = {
    u_time: { type: "f", value: 1.0 },
    u_resolution: { type: "v2", value: new THREE.Vector2() },
    u_noise: { type: "t", value: texture },
    u_map: { type: "t", value: map },
    u_environment: { type: "t", value: environment },
    u_mouse: { type: "v2", value: new THREE.Vector2() }
  };

  var material = new THREE.ShaderMaterial( {
    uniforms: uniforms,
    vertexShader: document.getElementById( 'vertexShader' ).textContent,
    fragmentShader: document.getElementById( 'fragmentShader' ).textContent
  } );
  material.extensions.derivatives = true;

  var mesh = new THREE.Mesh( geometry, material );
  scene.add( mesh );

  renderer = new THREE.WebGLRenderer();
  renderer.setPixelRatio( 1 );

  container.appendChild( renderer.domElement );

  onWindowResize();
  window.addEventListener( 'resize', onWindowResize, false );

  document.addEventListener('pointermove', (e)=> {
    let ratio = window.innerHeight / window.innerWidth;
    uniforms.u_mouse.value.x = (e.pageX - window.innerWidth / 2) / window.innerWidth / ratio;
    uniforms.u_mouse.value.y = (e.pageY - window.innerHeight / 2) / window.innerHeight * -1;
    
    e.preventDefault();
  });
}

function onWindowResize( event ) {
  renderer.setSize( window.innerWidth, window.innerHeight );
  uniforms.u_resolution.value.x = renderer.domElement.width;
  uniforms.u_resolution.value.y = renderer.domElement.height;
}

function animate(delta) {
  requestAnimationFrame( animate );
  render(delta);
}



let capturer = new CCapture( { 
  verbose: true, 
  framerate: 60,
  motionBlurFrames: 4,
  quality: 90,
  format: 'webm',
  workersPath: 'js/'
 } );
let capturing = false;

isCapturing = function(val) {
  if(val === false && window.capturing === true) {
    capturer.stop();
    capturer.save();
  } else if(val === true && window.capturing === false) {
    capturer.start();
  }
  capturing = val;
}
toggleCapture = function() {
  isCapturing(!capturing);
}

window.addEventListener('keyup', function(e) { if(e.keyCode == 68) toggleCapture() });


function render(delta) {
  uniforms.u_time.value = delta * .0005 - 10000;
  renderer.render( scene, camera );

  if(capturing) {
    capturer.capture( renderer.domElement );
  }
}
              
            
!
999px

Console