HTML preprocessors can make writing HTML more powerful or convenient. For instance, Markdown is designed to be easier to write and read for text documents and you could write a loop in Pug.
In CodePen, whatever you write in the HTML editor is what goes within the <body>
tags in a basic HTML5 template. So you don't have access to higher-up elements like the <html>
tag. If you want to add classes there that can affect the whole document, this is the place to do it.
In CodePen, whatever you write in the HTML editor is what goes within the <body>
tags in a basic HTML5 template. If you need things in the <head>
of the document, put that code here.
The resource you are linking to is using the 'http' protocol, which may not work when the browser is using https.
CSS preprocessors help make authoring CSS easier. All of them offer things like variables and mixins to provide convenient abstractions.
It's a common practice to apply CSS to a page that styles elements such that they are consistent across all browsers. We offer two of the most popular choices: normalize.css and a reset. Or, choose Neither and nothing will be applied.
To get the best cross-browser support, it is a common practice to apply vendor prefixes to CSS properties and values that require them to work. For instance -webkit-
or -moz-
.
We offer two popular choices: Autoprefixer (which processes your CSS server-side) and -prefix-free (which applies prefixes via a script, client-side).
Any URLs added here will be added as <link>
s in order, and before the CSS in the editor. You can use the CSS from another Pen by using its URL and the proper URL extension.
You can apply CSS to your Pen from any stylesheet on the web. Just put a URL to it here and we'll apply it, in the order you have them, before the CSS in the Pen itself.
You can also link to another Pen here (use the .css
URL Extension) and we'll pull the CSS from that Pen and include it. If it's using a matching preprocessor, use the appropriate URL Extension and we'll combine the code before preprocessing, so you can use the linked Pen as a true dependency.
JavaScript preprocessors can help make authoring JavaScript easier and more convenient.
Babel includes JSX processing.
Any URL's added here will be added as <script>
s in order, and run before the JavaScript in the editor. You can use the URL of any other Pen and it will include the JavaScript from that Pen.
You can apply a script from anywhere on the web to your Pen. Just put a URL to it here and we'll add it, in the order you have them, before the JavaScript in the Pen itself.
If the script you link to has the file extension of a preprocessor, we'll attempt to process it before applying.
You can also link to another Pen here, and we'll pull the JavaScript from that Pen and include it. If it's using a matching preprocessor, we'll combine the code before preprocessing, so you can use the linked Pen as a true dependency.
Search for and use JavaScript packages from npm here. By selecting a package, an import
statement will be added to the top of the JavaScript editor for this package.
Using packages here is powered by esm.sh, which makes packages from npm not only available on a CDN, but prepares them for native JavaScript ESM usage.
All packages are different, so refer to their docs for how they work.
If you're using React / ReactDOM, make sure to turn on Babel for the JSX processing.
If active, Pens will autosave every 30 seconds after being saved once.
If enabled, the preview panel updates automatically as you code. If disabled, use the "Run" button to update.
If enabled, your code will be formatted when you actively save your Pen. Note: your code becomes un-folded during formatting.
Visit your global Editor Settings.
<div class="audio-variable-font">
<div class="font"><span>OUT LOUD!</span></div>
<div id="micro" class="micro">
<div class="micro-wrapper">
<div class="dtc">
<canvas id="meter"></canvas>
<img src="https://www.kobufoundry.com/files/themes/foundry_theme/shortcodes/05_audio_controlled_variable_font/images/mic.svg" alt="">
<button type="button">start the mic</button>
</div>
</div>
</div>
</div>
$white: #ffffff;
$black: #000000;
:root {
--weight: 0;
--width: 0;
--height: 0;
}
* {
box-sizing: border-box;
}
body {
padding: 50px;
margin: 0;
background: $black;
font-size: 18px;
font-family: Helvetica, Tahoma, sans-serif;
font-weight: 300;
color: $white;
line-height: 1.44;
}
.audio-variable-font {
border: 1px solid #fff;
margin-bottom: 30px;
overflow: hidden;
display: flex;
flex-wrap: wrap;
align-items: stretch;
flex-direction: row-reverse;
& > div, & > li {
float: left;
}
&:after {
clear: both;
content: " ";
display: block;
height: 0;
line-height: 0;
visibility: hidden;
}
.micro {
border-right: 1px solid #fff;
width: 175px;
padding: 20px;
position: relative;
text-align: center;
.micro-wrapper {
display: table;
height: 100%;
.dtc {
display: table-cell;
vertical-align: middle;
}
}
img {
display: inline-block;
width: 60px;
height: 87px;
position: relative;
z-index: 2;
}
button {
transition: color .5s ease-in-out, background-color .5s ease-in-out;
display: inline-block;
color: #fff;
font-size: .77rem;
text-align: center;
margin-top: 20px;
border: 1px solid #fff;
border-radius: 18px;
padding: 7px 15px;
position: relative;
z-index: 2;
background-color: transparent;
cursor: pointer;
&:focus {
outline: none;
}
&:hover {
color: #000;
background-color: #fff;
}
}
canvas {
position: absolute;
bottom: 0;
left: 0;
width: 100%;
z-index: 1;
}
}
.font {
width: calc( 100% - 175px);
align-self: center;
padding: 20px 30px;
font-size: 6rem;
font-family: 'Rakki', sans-serif;;
font-variation-settings: 'wdth' var(--width), 'wght' var(--weight), 'hght' var(--height);
white-space: nowrap;
line-height: 1;
span {
display: block;
margin-top: -15px;
}
}
}
var audioContext = null,
meter = null,
analyser = null,
buf = null,
mediaStream = null,
mediaStreamSource = null,
canvas = document.getElementById('meter'),
canvasContext = canvas.getContext('2d'),
micro = document.getElementById('micro'),
openAudioContext = 0,
MIN_SAMPLES = 0;
var bar = {
x: 0,
y: 0,
width: micro.clientWidth,
height: 0,
fill: "#282828"
};
/*
The MIT License (MIT)
Copyright (c) 2014 Chris Wilson
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*/
function volumeAudioProcess(event) {
var buf = event.inputBuffer.getChannelData(0);
var bufLength = buf.length;
var sum = 0;
var x;
var scriptProcessorNode = event.target;
// Do a root-mean-square on the samples: sum up the squares...
for (var i = 0; i < bufLength; i++) {
x = buf[i];
if (Math.abs(x) >= scriptProcessorNode.clipLevel) {
scriptProcessorNode.clipping = true;
scriptProcessorNode.lastClip = window.performance.now();
}
sum += x * x;
}
// ... then take the square root of the sum.
var rms = Math.sqrt(sum / bufLength);
// Now smooth this out with the averaging factor applied
// to the previous sample - take the max here because we
// want "fast attack, slow release."
// this.volume = Math.max(rms, this.volume*this.averaging);
scriptProcessorNode.volume = rms; // without the slow release using gsap instead
}
/*
Usage:
audioNode = createAudioMeter(audioContext,clipLevel,averaging,clipLag);
audioContext: the AudioContext you're using.
clipLevel: the level (0 to 1) that you would consider "clipping".
Defaults to 0.98.
averaging: how "smoothed" you would like the meter to be over time.
Should be between 0 and less than 1. Defaults to 0.95.
clipLag: how long you would like the "clipping" indicator to show
after clipping has occured, in milliseconds. Defaults to 750ms.
Access the clipping through node.checkClipping(); use node.shutdown to get rid of it.
*/
function createAudioMeter(audioContext, clipLevel, averaging, clipLag) {
var processor = audioContext.createScriptProcessor(512);
processor.onaudioprocess = volumeAudioProcess;
processor.clipping = false;
processor.lastClip = 0;
processor.volume = 0;
processor.clipLevel = clipLevel || 0.98;
processor.averaging = averaging || 0.95;
processor.clipLag = clipLag || 750;
// this will have no effect, since we don't copy the input to the output,
// but works around a current Chrome bug.
processor.connect(audioContext.destination);
processor.checkClipping = function () {
if (!this.clipping) {
return false;
}
if (this.lastClip + this.clipLag < window.performance.now()) {
this.clipping = false;
}
return this.clipping;
};
processor.shutdown = function () {
this.disconnect();
this.onaudioprocess = null;
};
return processor;
}
function createAnalyser(audioContext) {
// Create analyzer
var analyser = audioContext.createAnalyser();
analyser.fftSize = 2048;
var bufferLength = analyser.frequencyBinCount;
buf = new Float32Array(bufferLength);
analyser.shutdown = function () {
this.disconnect();
};
return analyser;
}
function autoCorrelate( buf, sampleRate ) {
var GOOD_ENOUGH_CORRELATION = 0.9; // this is the "bar" for how close a correlation needs to be
var SIZE = buf.length;
var MAX_SAMPLES = Math.floor(SIZE/2);
var best_offset = -1;
var best_correlation = 0;
var rms = 0;
var foundGoodCorrelation = false;
var correlations = new Array(MAX_SAMPLES);
for (var i=0;i<SIZE;i++) {
var val = buf[i];
rms += val*val;
}
rms = Math.sqrt(rms/SIZE);
if (rms<0.01) { // not enough signal
return -1;
}
var lastCorrelation=1;
for (var offset = MIN_SAMPLES; offset < MAX_SAMPLES; offset++) {
var correlation = 0;
for (var j=0; j<MAX_SAMPLES; j++) {
correlation += Math.abs((buf[j])-(buf[j+offset]));
}
correlation = 1 - (correlation/MAX_SAMPLES);
correlations[offset] = correlation; // store it, for the tweaking we need to do below.
if ((correlation>GOOD_ENOUGH_CORRELATION) && (correlation > lastCorrelation)) {
foundGoodCorrelation = true;
if (correlation > best_correlation) {
best_correlation = correlation;
best_offset = offset;
}
} else if (foundGoodCorrelation) {
// short-circuit - we found a good correlation, then a bad one, so we'd just be seeing copies from here.
// Now we need to tweak the offset - by interpolating between the values to the left and right of the
// best offset, and shifting it a bit. This is complex, and HACKY in this code (happy to take PRs!) -
// we need to do a curve fit on correlations[] around best_offset in order to better determine precise
// (anti-aliased) offset.
// we know best_offset >=1,
// since foundGoodCorrelation cannot go to true until the second pass (offset=1), and
// we can't drop into this clause until the following pass (else if).
var shift = (correlations[best_offset+1] - correlations[best_offset-1])/correlations[best_offset];
return sampleRate/(best_offset+(8*shift));
}
lastCorrelation = correlation;
}
if (best_correlation > 0.01) {
// console.log("f = " + sampleRate/best_offset + "Hz (rms: " + rms + " confidence: " + best_correlation + ")")
return sampleRate/best_offset;
}
return -1;
// var best_frequency = sampleRate/best_offset;
}
function draw() {
// Frequency
analyser.getFloatTimeDomainData( buf );
var frequency = Math.round( autoCorrelate( buf, audioContext.sampleRate ) );
var widthVol = 0,
weightVol = 0,
heightVol = 0;
if ( ( frequency < 220 && frequency !== -1 ) ) {
var maxFontWidth = 500,
minFontWidth = 100,
widthScaleFactor = maxFontWidth * 5;
widthVol = meter.volume * widthScaleFactor;
if (widthVol > maxFontWidth) {
widthVol = maxFontWidth;
} else if (widthVol < minFontWidth) {
widthVol = minFontWidth;
}
}
if ( frequency > 1000 ) {
var maxFontHeight = 900,
minFontHeight = 100,
heightScaleFactor = maxFontHeight * 5;
heightVol = meter.volume * heightScaleFactor;
if (heightVol > maxFontHeight) {
heightVol = maxFontHeight;
} else if (heightVol < minFontHeight) {
heightVol = minFontHeight;
}
} else if( frequency !== -1 ) {
var maxFontWeight = 900,
minFontWeight = 100,
weightScaleFactor = maxFontWeight * 5;
weightVol = meter.volume * weightScaleFactor;
if (weightVol > maxFontWeight) {
weightVol = maxFontWeight;
} else if (weightVol < minFontWeight) {
weightVol = minFontWeight;
}
}
// Update css vars
gsap.to(":root", 1, {
"--width": widthVol,
"--height": heightVol,
"--weight": weightVol,
ease: Expo.easeOut
});
var WIDTH = micro.clientWidth,
HEIGHT = micro.clientHeight;
// clear the background
canvasContext.clearRect(0, 0, WIDTH, HEIGHT);
canvasContext.fillStyle = bar.fill;
// smooth out the bar animation
gsap.to(bar, 0.8, {
height: meter.volume * HEIGHT,
ease: Expo.easeOut
});
canvasContext.fillRect(0, HEIGHT - bar.height * 2, WIDTH, bar.height * 2);
}
function gotStream(stream) {
// Create an AudioNode from the stream.
mediaStreamSource = audioContext.createMediaStreamSource(stream);
// Create a new volume meter and connect it.
meter = createAudioMeter(audioContext);
mediaStreamSource.connect(meter);
analyser = createAnalyser(audioContext);
mediaStreamSource.connect(analyser);
analyser.connect(meter);
// kick off the visual updating
gsap.ticker.add(draw);
}
function closeAudioContext() {
if ( audioContext ) {
// Stop MediaStream tracks
var tracks = mediaStream.getTracks();
tracks.forEach(function(track) {
track.stop();
});
mediaStream = null;
mediaStreamSource.disconnect();
meter.shutdown();
analyser.shutdown();
gsap.ticker.remove(draw);
// Close audioContext
audioContext.close().then(function() {
var WIDTH = micro ? micro.clientWidth : 0,
HEIGHT = micro ? micro.clientHeight : 0;
// clear the background
canvasContext.clearRect(0, 0, WIDTH, HEIGHT);
gsap.to(bar, 0.8, {
height: 0,
ease: Expo.easeOut
});
// Animate the css var
gsap.to(":root", 1, {
"--weight": 0,
"--width": 0,
"--height": 0,
ease: Expo.easeOut
});
var button = document.querySelector('.micro button');
button.classList.remove('active');
button.innerHTML = 'start the mic';
audioContext = null;
openAudioContext = 0;
}).catch(function (err) {
console.log(err.name + ": " + err.message);
});
}
}
function initAudioContext() {
// grab an audio context
window.AudioContext = window.AudioContext || window.webkitAudioContext;
audioContext = new AudioContext();
var constraints = { audio: true, video: false };
navigator.mediaDevices.getUserMedia(constraints).then(function (stream) {
mediaStream = stream;
gotStream(stream);
var button = document.querySelector('.micro button');
button.classList.add('active');
button.innerHTML = 'stop the mic';
openAudioContext = 1;
}).catch(function (err) {
console.log(err.name + ": " + err.message);
});
}
function resizeCanvas() {
canvas.width = micro.clientWidth;
canvas.height = micro.clientHeight;
}
function buttonListener() {
if ( this.classList.contains("active") ) {
closeAudioContext();
} else {
initAudioContext();
}
}
document.querySelector('.micro button').addEventListener('click', buttonListener, false);
resizeCanvas();
window.addEventListener('resize', resizeCanvas, false);
Also see: Tab Triggers