Sampled Auditory Content
An Introduction with Examples in Java |
Prof. David Bernstein |
Computer Science Department |
bernstdh@jmu.edu |
Temporal Sampling
Quantization
AudioFormat
object contains the number of channels (e.g., mono, stereo),
the sampling rate, the quantiziation (i.e., the number
of bits per sample), and the encoding technique (e.g.,
linear pulse code modulation, nonlinear mu-law).Conceptual Model of the Presentation of Sampled Audio
import java.io.*; import javax.sound.sampled.*; import io.ResourceFinder; /** * A simple application that can be used to present * sampled auditory content that is stored in a file * * @author Prof. David Bernstein, James Madison University * @version 1.0 */ public class ClipPlayer { /** * The entry point of the application * * args[0] should contain the file to load and present * * @param args The command-line arguments */ public static void main(String[] args) throws Exception { AudioFormat format; AudioInputStream stream; Clip clip; DataLine.Info info; InputStream is; ResourceFinder finder; if ((args != null) && (!args[0].equals(""))) { // Get the resource finder = ResourceFinder.createInstance(); is = finder.findInputStream("/"+args[0]); // Create an AudioInputStream from the InputStream stream = AudioSystem.getAudioInputStream(is); // Get the AudioFormat for the File format = stream.getFormat(); // Create an object that contains all relevant // information about a DataLine for this AudioFormat info = new DataLine.Info(Clip.class, format); // Create a Clip (i.e., a pre-loaded Line) clip = (Clip)AudioSystem.getLine(info); // Tell the Clip to acquire any required system // resources and become operational clip.open(stream); // Present the Clip (without blocking the // thread of execution) clip.start(); System.out.println("Press [Enter] to exit..."); System.in.read(); } else { System.out.println("You forgot the file name"); } } }
AudioFormat
BufferedSound
Classpackage auditory.sampled; import java.util.*; import javax.sound.sampled.*; /** * An in-memory representation of sampled auditory content. * Because this is a complete in-memory representation it often uses * a lot of memory. One could, alternatively, keep part of the * content in-memory and store the remainder in a file (e.g., using * a ring buffer). * * An individual BufferedSound can only be manipulated by one thread * at a time. This should not be a problem in practice since, most * often, a BufferedSound will be manipulated first and then rendered. * * Note: For simplicity, all BufferedSound objects use signed PCM with * a 16bit sample size, and a big-endian byte order (i.e., network * byte order) * * @author Prof. David Bernstein, James Madison University * @version 1.0 */ public class BufferedSound implements Content { private ArrayList<double[]> channels; private AudioFormat format; private int numberOfSamples; private static final double MAX_AMPLITUDE = 32767.0; private static final double MIN_AMPLITUDE = -32767.0; private static final int SAMPLE_SIZE_IN_BITS = 16; private static final int BYTES_PER_CHANNEL = SAMPLE_SIZE_IN_BITS/8; /** * Explicit Value Constructor * * @param sampleRate The sampling rate (in Hz) */ public BufferedSound(float sampleRate) { format = new AudioFormat( AudioFormat.Encoding.PCM_SIGNED, sampleRate, // Sample rate in Hz SAMPLE_SIZE_IN_BITS, // Sample size in bits 0, // Number of channels 0, // Frame size in bytes sampleRate, // Frame rate in Hz true); // Big-endian or not channels = new ArrayList<double[]>(); numberOfSamples = 0; } /** * Add a channel to this BufferedSound * * One channel corresponds to "mono", two channels corresponds * to "stereo", etc... */ public synchronized void addChannel(double[] signal) { if (numberOfSamples == 0) numberOfSamples = signal.length; if (numberOfSamples == signal.length) { channels.add(signal); updateAudioFormat(); } } /** * Append a BufferedSound to this BufferedSound * * Note: If the BufferedSound to append does not match * this BufferedSound then nothing is done * * @param other The BufferedSound to append */ public synchronized void append(BufferedSound other) { ArrayList<double[]> temp; double[] otherSignal, tempSignal, thisSignal; Iterator<double[]> i, j; if (matches(other)) { temp = new ArrayList<double[]>(); i = channels.iterator(); j = other.channels.iterator(); while (i.hasNext()) { thisSignal = i.next(); otherSignal = j.next(); // Allocate space for the new signal tempSignal = new double[thisSignal.length + otherSignal.length]; // Copy the current signal System.arraycopy(thisSignal, 0, tempSignal, 0, thisSignal.length); // Append the other left signal System.arraycopy(otherSignal, 0, tempSignal, thisSignal.length, otherSignal.length); // Save the longer signal temp.add(tempSignal); } channels = temp; } } /** * Get the AudioFormat for this BufferedSound * * @return The AudioFormat */ public synchronized AudioFormat getAudioFormat() { return format; } /** * Get the signals * Note: It is dangerous to provide access to the * signal data since it could be modified in * inappropriate ways * * @return The signal for the left output */ public synchronized Iterator<double[]> getSignals() { return channels.iterator(); } /** * Get the length of this BufferedSound in microseconds * * @return The length in microseconds */ public synchronized int getMicrosecondLength() { return (int)(getNumberOfSamples() / getSampleRate() * 1000000.0 ); } /** * Get the length of this BufferedSound in milliseconds * * @return The length in milliseconds */ public synchronized int getMillisecondLength() { return getMicrosecondLength()/1000; } /** * Get the number of channels * * @return The number of channels */ public synchronized int getNumberOfChannels() { return channels.size(); } /** * Get the number of samples (per channel) in this BufferedSound * * @return The number of samples */ public synchronized int getNumberOfSamples() { return numberOfSamples; } /** * Get the sampling rate for this BufferedSound * * @return The sampling rate (in Hz) */ public synchronized float getSampleRate() { return format.getSampleRate(); } /** * Compares this BufferedSound object to another * * @param other The BufferedSound to compare to * @return true if the two match; false otherwise */ public synchronized boolean matches(BufferedSound other) { boolean result; result = false; result = getAudioFormat().matches(other.getAudioFormat()) && (getNumberOfSamples() == other.getNumberOfSamples()); return result; } /** * Render this BufferedSound on the given Clip * * @param clip The Clip to use */ public synchronized void render(Clip clip) throws LineUnavailableException { byte[] rawBytes; double[] signal; int channel, frameSize, length, offset, size; Iterator<double[]> iterator; short scaled; size = channels.size(); length = getNumberOfSamples(); frameSize = format.getFrameSize(); // bytes samples/channel * bytes/channel * channels rawBytes = new byte[length * BYTES_PER_CHANNEL * size]; channel = 0; iterator = channels.iterator(); while (iterator.hasNext()) { signal = iterator.next(); offset = channel * BYTES_PER_CHANNEL; for (int i=0; i<length; i++) { scaled = scaleSample(signal[i]); // Big-endian rawBytes[frameSize*i+offset] = (byte)(scaled >> 8); rawBytes[frameSize*i+offset+1] = (byte)(scaled & 0xff); // Little-endian // rawBytes[frameSize*i+offset+1] = (byte)(scaled >> 8); // rawBytes[frameSize*i+offset] = (byte)(scaled & 0xff); } ++channel; } // Throws LineUnavailableException clip.open(format, rawBytes, 0, rawBytes.length); // Start the Clip clip.start(); } /** * Scale a sample so that it fits in a signed short * (i.e., two bytes) * * @param sample The sample to scale */ private short scaleSample(double sample) { short scaled; if (sample > MAX_AMPLITUDE) scaled=(short)MAX_AMPLITUDE; else if (sample < MIN_AMPLITUDE) scaled=(short)MIN_AMPLITUDE; else scaled=(short)sample; return scaled; } /** * Update the AudioFormat (usually after a channel is added) */ private void updateAudioFormat() { format = new AudioFormat( format.getEncoding(), // Encoding format.getSampleRate(), // Sample rate in Hz format.getSampleSizeInBits(), // Sample size in bits channels.size(), // Number of channels channels.size()*BYTES_PER_CHANNEL, // Frame size in bytes format.getSampleRate(), // Frame rate in Hz format.isBigEndian()); // Big-endian or not } }
BufferedSound
Objects:
A 100Hz Sine Wave
/** * Create a BufferedSound from a sine wave with a * particular frequency * * The length of the sound is measured in microseconds * to be consistent with the Clip interface * * @param frequency The frequency of the wave (in Hz) * @param length The length of the sound (in microseconds) * @param sampleRate The number of samples per second * @param amplitude The maximum amplitude of the wave in [0.0, 32767.0] */ public static BufferedSound createBufferedSound( double frequency, int length, float sampleRate, double amplitude) { BufferedSound sound; double radians,radiansPerSample, rmsValue; double[] signal; int n; //samples = samples/sec * sec n = (int)(sampleRate * (double)length/1000000.0); signal = new double[n]; // rads/sample = ( rads/cycle * cycles/sec)/ samples/sec radiansPerSample = (Math.PI*2.0 * frequency) / sampleRate; for (int i=0; i<signal.length; i++) { // rad = rad/sample * sample radians = radiansPerSample * i; signal[i] = amplitude * Math.sin(radians); } sound = new BufferedSound(sampleRate); sound.addChannel(signal); return sound; }
inFormat = inStream.getFormat(); // Convert ULAW and ALAW to PCM if ((inFormat.getEncoding() == AudioFormat.Encoding.ULAW) || (inFormat.getEncoding() == AudioFormat.Encoding.ALAW) ) { pcmFormat = new AudioFormat( AudioFormat.Encoding.PCM_SIGNED, inFormat.getSampleRate(), inFormat.getSampleSizeInBits()*2, inFormat.getChannels(), inFormat.getFrameSize()*2, inFormat.getFrameRate(), true); pcmStream = AudioSystem.getAudioInputStream(pcmFormat, inStream); } else // It is PCM { pcmFormat = inFormat; pcmStream = inStream; }
// Create a buffer and read the raw bytes bufferSize = (int)(pcmStream.getFrameLength()) * pcmFormat.getFrameSize(); rawBytes = new byte[bufferSize]; pcmStream.read(rawBytes);
// Convert the raw bytes if (pcmFormat.getSampleSizeInBits() == 8) { signal = processEightBitQuantization(rawBytes, pcmFormat); } else { signal = processSixteenBitQuantization(rawBytes, pcmFormat); }
/** * Convert the raw bytes for 8-bit samples * * @param rawBytes The array of raw bytes * @param format The AudioFormat */ private static int[] processEightBitQuantization( byte[] rawBytes, AudioFormat format) { int lsb, msb; int[] signal; String encoding; signal = new int[rawBytes.length]; encoding = format.getEncoding().toString(); if (encoding.startsWith("PCM_SIGN")) { for (int i=0; i<rawBytes.length; i++) signal[i] = rawBytes[i]; } else { for (int i=0; i<rawBytes.length; i++) signal[i] = rawBytes[i]-128; } return signal; }
sound = new BufferedSound(pcmFormat.getSampleRate()); // Process the individual channels if (pcmFormat.getChannels() == 1) // Mono { sampleLength = signal.length; monoSignal = new double[sampleLength]; for (int i=0; i<sampleLength; i++) { monoSignal[i] = signal[i]; // Convert to double } sound.addChannel(monoSignal); } else // Stereo { sampleLength = signal.length/2; leftSignal = new double[sampleLength]; rightSignal = new double[sampleLength]; for (int i=0; i<sampleLength; i++) { leftSignal[i] = signal[2*i]; rightSignal[i] = signal[2*i+1]; } sound.addChannel(leftSignal); sound.addChannel(rightSignal); }
package auditory.sampled; /** * The requirements of all unary operations that * can be performed on BufferedSound objects * * @author Prof. David Bernstein, James Madison University * @version 1.0 */ public interface BufferedSoundUnaryOp { /** * Performs a single-input/single-output operation on a * BufferedSound. If the destination is null, a BufferedSound with * an appropriate AudioFormat and length is created and returned. * * @param src The operand (i.e., sound to operate on) * @param dest An empty sound to hold the result (or null) * @throws IllegalArgumentException if the sounds don't match */ public BufferedSound filter(BufferedSound src, BufferedSound dest); }
package auditory.sampled; /** * The requirements of all unary operations that * can be performed on BufferedSound objects * * @author Prof. David Bernstein, James Madison University * @version 1.0 */ public interface BufferedSoundBinaryOp { /** * Performs a dual-input/single-output operation on a * BufferedSound. If the destination is null, * a BufferedSound with an appropriate AudioFormat and length * is created and returned. * * @param src1 One operand (i.e., one sound to operate on) * @param src2 The other operand (i.e., other sound to operate on) * @param dest An empty sound to hold the result (or null) * @throws IllegalArgumentException if the sounds don't match */ public BufferedSound filter(BufferedSound src1, BufferedSound src2, BufferedSound dest) throws IllegalArgumentException; }
package auditory.sampled; /** * An abstract class that implements the BufferedSoundOp * interface. This method can be extended by classes that * want to implement, for example, the BufferedSoundUnaryOp and * BufferedSoundUnaryOp interfaces. * * @author Prof. David Bernstein, James Madison University * @version 1.0 */ public abstract class AbstractBufferedSoundOp { /** * Creates a BufferedSound with the same sampling rate and length * as the source. All of the samples in the new BufferedSound will * be 0. * * @param src The BufferedSound to mimic */ public BufferedSound createCompatibleDestinationSound( BufferedSound src) { BufferedSound temp; float sampleRate; int channels, length; channels = src.getNumberOfChannels(); length = src.getNumberOfSamples(); sampleRate = src.getSampleRate(); temp = new BufferedSound(sampleRate); for (int i=0; i<channels; i++) { temp.addChannel(new double[length]); } return temp; } /** * Check to see if two BufferedSound objects are compatible. * * @throws IllegalArgumentException If they are not compatible */ protected void checkArguments(BufferedSound a, BufferedSound b) throws IllegalArgumentException { if (!a.matches(b)) throw(new IllegalArgumentException("Argument Mismatch")); } }
package auditory.sampled; import java.util.*; /** * An abstract class that implements the BufferedSoundUnaryOp * interface. This method can be extended by classes that * want to implement the BufferedSoundUnaryOp interface. * * @author Prof. David Bernstein, James Madison University * @version 1.0 */ public abstract class AbstractBufferedSoundUnaryOp extends AbstractBufferedSoundOp implements BufferedSoundUnaryOp { /** * Apply the filter (sample-by-sample). This method * must be implemented by concrete children * * @param source The signal from source * @param destination The destination signals */ public abstract void applyFilter(double[] source, double[] destination); /** * Apply the filter to all of the channels * * * @param source The source signals * @param destination The destination signals */ public void applyFilter(Iterator<double[]> source, Iterator<double[]> destination) { while (source.hasNext()) { applyFilter(source.next(), destination.next()); } } /** * A two-source/one-destination filter. If the * destination is null, a BufferedSound with an appropriate * AudioFormat and length is created and returned. * * @param src The operand (i.e., the sound to operate on) * @param dest An empty sound to hold the result (or null) */ public BufferedSound filter(BufferedSound src, BufferedSound dest) { Iterator<double[]> source, destination; // Construct the destination if necessary; otherwise check it if (dest == null) dest = createCompatibleDestinationSound(src); // Get the source channels source = src.getSignals(); // Get the destination channels destination = dest.getSignals(); // Apply the filter applyFilter(source, destination); return dest; } }
package auditory.sampled; import java.util.*; /** * An abstract class that implements the BufferedSoundBinaryOp * interface. This method can be extended by classes that * want to implement the BufferedSoundBinaryOp interface. * * @author Prof. David Bernstein, James Madison University * @version 1.0 */ public abstract class AbstractBufferedSoundBinaryOp extends AbstractBufferedSoundOp implements BufferedSoundBinaryOp { /** * Apply the filter (sample-by-sample). This method * must be implemented by concrete children * * @param source1 The signal from source1 * @param source2 The signal from source2 * @param destination The destination signals */ public abstract void applyFilter(double[] source1, double[] source2, double[] destination); /** * Apply the filter to all of the channels * * @param source1 The signals from source1 * @param source2 The signals from source2 * @param destination The destination signals */ public void applyFilter(Iterator<double[]> source1, Iterator<double[]> source2, Iterator<double[]> destination) { while (source1.hasNext()) { applyFilter(source1.next(), source2.next(), destination.next()); } } /** * Check to see if two BufferedSound objects are compatible. * * @throws IllegalArgumentException If they are not compatible */ protected void checkArguments(BufferedSound a, BufferedSound b) throws IllegalArgumentException { if (!a.matches(b)) throw(new IllegalArgumentException("Argument Mismatch")); } /** * A two-source/one-destination filter. If the * destination is null, a BufferedSound with an appropriate * AudioFormat and length is created and returned. * * @param src1 One operand (i.e., one sound to operate on) * @param src2 The other operand (i.e., other sound to operate on) * @param dest An empty sound to hold the result (or null) * @throws IllegalArgumentException if the sounds don't match */ public BufferedSound filter(BufferedSound src1, BufferedSound src2, BufferedSound dest) throws IllegalArgumentException { Iterator<double[]> source1, source2, destination; // Check the properties of the two source sounds checkArguments(src1, src2); // Construct the destination if necessary; otherwise check it if (dest == null) dest = createCompatibleDestinationSound(src1); else checkArguments(src1, dest); // Get the source channels source1 = src1.getSignals(); source2 = src2.getSignals(); // Get the destination channels destination = dest.getSignals(); // Apply the filter applyFilter(source1, source2, destination); return dest; } }
package auditory.sampled; /** * A BufferedSoundBinaryOp that adds two (comparable) * BufferedSound objects sample-by-sample * * @author Prof. David Bernstein, James Madison University * @version 1.0 */ public class AddOp extends AbstractBufferedSoundBinaryOp { /** * Adds (sample-by-sample) the two BufferedSound objects. * * @param source1 The signal in source 1 * @param source2 The signal in source 2 * @param destination The resulting channel */ public void applyFilter(double[] source1, double[] source2, double[] destination) { for (int i=0; i<source1.length; i++) { destination[i] = source1[i] + source2[i]; } } }
Harmonics (100Hz + 200Hz)
Beating/Phasing (100Hz + 105Hz)
package auditory.sampled; /** * A BufferedSoundUnaryOp that reverses a BufferedSound * * @author Prof. David Bernstein, James Madison University * @version 1.0 */ public class ReverseOp extends AbstractBufferedSoundUnaryOp { /** * Revers the signal * * @param source The source signal * @param destination The resulting signal */ public void applyFilter(double[] source, double[] destination) { int length; length = source.length; for (int i=0; i<length; i++) { destination[i] = source[length-1-i]; } } }
An Infinite, Linear, Causal Filter
\( d_{i} = \sum_{k=0}^{n} s_{i-k} w_{k} + \sum_{j=0}^{m} d_{i-j} v_{j} \)
A Finite, Linear, Causal Filter (Finite Impulse Response Filter)
\( d_{i} = \sum_{k=0}^{n} s_{i-k} w_{k} \)
An Illustration
package auditory.sampled; /** * An encapsulation of a Finite Impulse Response (FIR) filter * * @author Prof. David Bernstein, James Madison University * @version 1.0 */ public class FIRFilter { private double[] weights; /** * Explicit Value Constructor * * @param weights The weights to apply */ public FIRFilter(double[] weights) { this.weights = new double[weights.length]; System.arraycopy(weights, 0, this.weights, 0, weights.length); } /** * Get the number of weights (i.e., coefficients) in this * FIR filter * * @return The number of weights */ public int getLength() { int length; length = 0; if (weights != null) length = weights.length; return length; } /** * Get a particular weight (i.e., coefficient) * * @param index The index of the weight */ public double getWeight(int index) { double weight; weight = 0.0; if ((weights == null) && (index == weights.length-1)) { weight = 1.0; } else if ((index >=0) && (index < weights.length-1)) { weight = weights[index]; } return weight; } }
package auditory.sampled; /** * A BufferedSoundUnaryOp that applies a FIRFilter to a * BufferedSound * * @author Prof. David Bernstein, James Madison University * @version 1.0 */ public class FIRFilterOp extends AbstractBufferedSoundUnaryOp { private FIRFilter fir; /** * Explicit Value Constructor * * @param fir The FIRFilter to use */ public FIRFilterOp(FIRFilter fir) { this.fir = fir; } /** * Apply a FIRFilter * * @param source The source signal * @param destination The resulting signal */ public void applyFilter(double[] source, double[] destination) { double weight; int length, n; n = fir.getLength(); length = source.length; // Copy the first n-2 samples for (int i=0; i<n-1; i++) { destination[i] = source[i]; } // Filter the remaining samples for (int i=n-1; i<length; i++) { for (int k=0; k<n; k++) { weight = fir.getWeight(k); destination[i] += source[i-k] * weight; } } } }
BufferedSound a, sound; BufferedSoundUnaryOp op; BufferedSoundWindow window; double frequency; double[] weights; FIRFilter firFilter; int n; a = BufferedSoundFactory.createBufferedSound("/auditory/sampled/"+ args[0]); n = (int)(a.getNumberOfSamples() / 20.0); weights = new double[n]; // The weight on sample i is 1.0 weights[0] = 1.0; // The weights on the "oldest" 1/4 of the samples for (int i=3*n/4; i<n; i++) weights[i] = 0.1; firFilter = new FIRFilter(weights); op = new FIRFilterOp(firFilter); sound = op.filter(a, null);
Using the Composite Pattern
package auditory.sampled; import java.util.*; import javax.sound.sampled.*; /** * An in-memory representation of sampled auditory content. * Because this is a complete in-memory representation it often uses * a lot of memory. One could, alternatively, keep part of the * content in-memory and store the remainder in a file (e.g., using * a ring buffer). * * An individual BufferedSound can only be manipulated by one thread * at a time. This should not be a problem in practice since, most * often, a BufferedSound will be manipulated first and then rendered. * * Note: For simplicity, all BufferedSound objects use signed PCM with * a 16bit sample size, and a big-endian byte order (i.e., network * byte order) * * @author Prof. David Bernstein, James Madison University * @version 1.0 */ public class BufferedSound implements Content { private ArrayList<double[]> channels; private AudioFormat format; private int numberOfSamples; private static final double MAX_AMPLITUDE = 32767.0; private static final double MIN_AMPLITUDE = -32767.0; private static final int SAMPLE_SIZE_IN_BITS = 16; private static final int BYTES_PER_CHANNEL = SAMPLE_SIZE_IN_BITS/8; /** * Explicit Value Constructor * * @param sampleRate The sampling rate (in Hz) */ public BufferedSound(float sampleRate) { format = new AudioFormat( AudioFormat.Encoding.PCM_SIGNED, sampleRate, // Sample rate in Hz SAMPLE_SIZE_IN_BITS, // Sample size in bits 0, // Number of channels 0, // Frame size in bytes sampleRate, // Frame rate in Hz true); // Big-endian or not channels = new ArrayList<double[]>(); numberOfSamples = 0; } /** * Add a channel to this BufferedSound * * One channel corresponds to "mono", two channels corresponds * to "stereo", etc... */ public synchronized void addChannel(double[] signal) { if (numberOfSamples == 0) numberOfSamples = signal.length; if (numberOfSamples == signal.length) { channels.add(signal); updateAudioFormat(); } } /** * Append a BufferedSound to this BufferedSound * * Note: If the BufferedSound to append does not match * this BufferedSound then nothing is done * * @param other The BufferedSound to append */ public synchronized void append(BufferedSound other) { ArrayList<double[]> temp; double[] otherSignal, tempSignal, thisSignal; Iterator<double[]> i, j; if (matches(other)) { temp = new ArrayList<double[]>(); i = channels.iterator(); j = other.channels.iterator(); while (i.hasNext()) { thisSignal = i.next(); otherSignal = j.next(); // Allocate space for the new signal tempSignal = new double[thisSignal.length + otherSignal.length]; // Copy the current signal System.arraycopy(thisSignal, 0, tempSignal, 0, thisSignal.length); // Append the other left signal System.arraycopy(otherSignal, 0, tempSignal, thisSignal.length, otherSignal.length); // Save the longer signal temp.add(tempSignal); } channels = temp; } } /** * Get the AudioFormat for this BufferedSound * * @return The AudioFormat */ public synchronized AudioFormat getAudioFormat() { return format; } /** * Get the signals * Note: It is dangerous to provide access to the * signal data since it could be modified in * inappropriate ways * * @return The signal for the left output */ public synchronized Iterator<double[]> getSignals() { return channels.iterator(); } /** * Get the length of this BufferedSound in microseconds * * @return The length in microseconds */ public synchronized int getMicrosecondLength() { return (int)(getNumberOfSamples() / getSampleRate() * 1000000.0 ); } /** * Get the length of this BufferedSound in milliseconds * * @return The length in milliseconds */ public synchronized int getMillisecondLength() { return getMicrosecondLength()/1000; } /** * Get the number of channels * * @return The number of channels */ public synchronized int getNumberOfChannels() { return channels.size(); } /** * Get the number of samples (per channel) in this BufferedSound * * @return The number of samples */ public synchronized int getNumberOfSamples() { return numberOfSamples; } /** * Get the sampling rate for this BufferedSound * * @return The sampling rate (in Hz) */ public synchronized float getSampleRate() { return format.getSampleRate(); } /** * Compares this BufferedSound object to another * * @param other The BufferedSound to compare to * @return true if the two match; false otherwise */ public synchronized boolean matches(BufferedSound other) { boolean result; result = false; result = getAudioFormat().matches(other.getAudioFormat()) && (getNumberOfSamples() == other.getNumberOfSamples()); return result; } /** * Render this BufferedSound on the given Clip * * @param clip The Clip to use */ public synchronized void render(Clip clip) throws LineUnavailableException { byte[] rawBytes; double[] signal; int channel, frameSize, length, offset, size; Iterator<double[]> iterator; short scaled; size = channels.size(); length = getNumberOfSamples(); frameSize = format.getFrameSize(); // bytes samples/channel * bytes/channel * channels rawBytes = new byte[length * BYTES_PER_CHANNEL * size]; channel = 0; iterator = channels.iterator(); while (iterator.hasNext()) { signal = iterator.next(); offset = channel * BYTES_PER_CHANNEL; for (int i=0; i<length; i++) { scaled = scaleSample(signal[i]); // Big-endian rawBytes[frameSize*i+offset] = (byte)(scaled >> 8); rawBytes[frameSize*i+offset+1] = (byte)(scaled & 0xff); // Little-endian // rawBytes[frameSize*i+offset+1] = (byte)(scaled >> 8); // rawBytes[frameSize*i+offset] = (byte)(scaled & 0xff); } ++channel; } // Throws LineUnavailableException clip.open(format, rawBytes, 0, rawBytes.length); // Start the Clip clip.start(); } /** * Scale a sample so that it fits in a signed short * (i.e., two bytes) * * @param sample The sample to scale */ private short scaleSample(double sample) { short scaled; if (sample > MAX_AMPLITUDE) scaled=(short)MAX_AMPLITUDE; else if (sample < MIN_AMPLITUDE) scaled=(short)MIN_AMPLITUDE; else scaled=(short)sample; return scaled; } /** * Update the AudioFormat (usually after a channel is added) */ private void updateAudioFormat() { format = new AudioFormat( format.getEncoding(), // Encoding format.getSampleRate(), // Sample rate in Hz format.getSampleSizeInBits(), // Sample size in bits channels.size(), // Number of channels channels.size()*BYTES_PER_CHANNEL, // Frame size in bytes format.getSampleRate(), // Frame rate in Hz format.isBigEndian()); // Big-endian or not } }
The BoomBox
//skeleton0. package auditory.sampled; import java.util.*; import javax.sound.sampled.*; /** * Renders/presents sampled auditory content * * @author Prof. David Bernstein, James Madison Univeristy * @version 1.0 */ public class BoomBox implements LineListener { private Content content; private Clip clip; private final Object sync = new Object(); private Vector<LineListener> listeners = new Vector<LineListener>(); /** * Explicit Value Constructor * * @param content The Content */ public BoomBox(Content content) { this.content = content; } /** * Add a LineListener to this Content * * @param listener The LineListener to add */ public void addLineListener(LineListener listener) { listeners.add(listener); } /** * Render the Content without blocking */ public void render() throws LineUnavailableException { render(false); } /** * Render this Content * * @param block true to block the calling thread until the clip stops */ public void render(boolean block) throws LineUnavailableException { Clip clip; DataLine.Info info; info = new DataLine.Info(Clip.class, content.getAudioFormat()); clip = (Clip)AudioSystem.getLine(info); clip.addLineListener(this); // So the calling thread can be informed content.render(clip); synchronized(sync) { // Wait until the Clip stops [and notifies us by // calling the update() method] if (block) { try { sync.wait(); } catch (InterruptedException ie) { // Ignore } } } } /** * Remove a LineListener * * @param listener The LineListener to add */ public void removeLineListener(LineListener listener) { listeners.remove(listener); } /** * Handle LineEvents (required by LineListener) * * @param evt The LineEvent of interest */ public void update(LineEvent evt) { Enumeration e; LineEvent.Type type; LineListener listener; synchronized(sync) { // Forward the LineEvent to all LineListener objects e = listeners.elements(); while (e.hasMoreElements()) { listener = (LineListener)e.nextElement(); listener.update(evt); } // Get the type of the event type = evt.getType(); // Process STOP events if (type.equals(LineEvent.Type.STOP)) { sync.notifyAll(); clip.close(); clip.removeLineListener(this); clip = null; } } } }
Getting Information on Capabilities
import javax.sound.sampled.*; /** * A simple utility that shows the capabilities of the * available auditory output devices * * @author Prof. David Bernstein, James Madison University * @version 1.0 */ public class ShowCapabilities { /** * The entry-point * * @param args The command line arguments */ public static void main(String[] args) { Line[] lines; Line.Info[] lineinfo; Mixer mixer; Mixer.Info[] mixerinfo; mixerinfo = AudioSystem.getMixerInfo(); for (int i=0; i<mixerinfo.length; i++) { System.out.println(mixerinfo[i]); mixer = AudioSystem.getMixer(mixerinfo[i]); lineinfo = mixer.getTargetLineInfo(); System.out.println("\n Target Lines"); for (int j=0; j<lineinfo.length; j++) { System.out.println(" "+lineinfo[j]+ " ("+mixer.getMaxLines(lineinfo[j])+")"); } lineinfo = mixer.getSourceLineInfo(); System.out.println("\n Source Lines"); for (int j=0; j<lineinfo.length; j++) { System.out.println(" "+lineinfo[j]+ " ("+mixer.getMaxLines(lineinfo[j])+")"); } System.out.println("\n\n"); } } }