const DURATION = 6; // Loop 3 sec
// Size of render target
const WIDTH = 512;
const HEIGHT = 512;
const fragmentShader = `
precision mediump float;
uniform float iSampleRate;
uniform float iBlockOffset;
float tri(in float freq, in float time) {
return -abs(1. - mod(freq * time * 2., 2.));
}
vec2 mainSound( float time )
{
float freq = 440.;
freq *= pow(1.06 * 1.06, floor(mod(time, 6.)));
return vec2(
tri(freq, time) * sin(time * 3.141592),
tri(freq * 1.5, time) * sin(time * 3.141592)
);
}
void main() {
float t = iBlockOffset + ((gl_FragCoord.x-0.5) + (gl_FragCoord.y-0.5)*512.0) / iSampleRate;
vec2 y = mainSound(t);
vec2 v = floor((0.5+0.5*y)*65536.0);
vec2 vl = mod(v,256.0)/255.0;
vec2 vh = floor(v/256.0)/255.0;
gl_FragColor = vec4(vl.x,vh.x,vl.y,vh.y);
}`;
// Create audio context
const ctx = new window.AudioContext();
const node = ctx.createBufferSource();
node.connect(ctx.destination);
node.loop = true;
const audioBuffer = ctx.createBuffer(2, ctx.sampleRate * DURATION, ctx.sampleRate);
// Create canvas
const canvas = document.createElement('canvas');
canvas.width = WIDTH;
canvas.height = HEIGHT;
const renderer = new THREE.WebGLRenderer({ canvas, alpha: true });
const wctx = renderer.getContext();
// Create scenes
const uniforms = {
iBlockOffset: { type: 'f', value: 0.0 },
iSampleRate: { type: 'f', value: ctx.sampleRate },
};
const geometry = new THREE.PlaneGeometry(2, 2);
const material = new THREE.ShaderMaterial({ uniforms, fragmentShader });
const plane = new THREE.Mesh(geometry, material);
const scene = new THREE.Scene();
const camera = new THREE.OrthographicCamera(-1, 1, 1, -1, 0.1, 10);
camera.position.set(0, 0, 1);
camera.lookAt(scene.position);
scene.add(plane);
const target = new THREE.WebGLRenderTarget(WIDTH, HEIGHT);
// Render
const samples = WIDTH * HEIGHT;
const numBlocks = (ctx.sampleRate * DURATION) / samples;
for (let i = 0; i < numBlocks; i++) {
// Update uniform & Render
uniforms.iBlockOffset.value = i * samples / ctx.sampleRate;
renderer.render(scene, camera, target, true);
// Read pixels
const pixels = new Uint8Array(WIDTH * HEIGHT * 4);
wctx.readPixels(0, 0, WIDTH, HEIGHT, wctx.RGBA, wctx.UNSIGNED_BYTE, pixels);
// Convert pixels to samples
const outputDataL = audioBuffer.getChannelData(0);
const outputDataR = audioBuffer.getChannelData(1);
for (let j = 0; j < samples; j++) {
outputDataL[i * samples + j] = (pixels[j * 4 + 0] + 256 * pixels[j * 4 + 1]) / 65535 * 2 - 1;
outputDataR[i * samples + j] = (pixels[j * 4 + 2] + 256 * pixels[j * 4 + 3]) / 65535 * 2 - 1;
}
}
// Play
node.buffer = audioBuffer;
node.start(0);
View Compiled
This Pen doesn't use any external CSS resources.