目标任务
- 使用AudioRecord 采集PCM数据
- 使用AudioTrack API播放PCM音频
- 将Pcm数据转化为Wav文件
- 读取Wav文件数据,用AudioTrack 进行播放
1.使用AudioRecord 采集PCM数据
AudioRecord 的构造方法:
public AudioTrack(int streamType, int sampleRateInHz, int channelConfig, int audioFormat, int bufferSizeInBytes, int mode)
- streamType 音频来源那就是麦克风了
- sampleRateInHz 采样频率,网上都有介绍,一般是44100HZ
- channelConfig 音频的通道分为单声道和立体音
- audioFormat:该参数为量化深度,即为每次采样的位数
- bufferSizeInBytes:通过 getMinBufferSize()方法可以获得,根据我们采样录制的过程中的参数来确定,每次从硬件读取数据所需要的缓冲区的大小。
录音代码
- 初始化AudioRecord
private void init() {
//指定音频源
audioSource = MediaRecorder.AudioSource.MIC;
//指定采样率(MediaRecoder 的采样率通常是8000Hz CD的通常是44100Hz 不同的Android手机硬件将能够以不同的采样率进行采样。其中11025是一个常见的采样率)
frequency = 44100;
//指定捕获音频的通道数目.在AudioFormat类中指定用于此的常量
channelConfig = AudioFormat.CHANNEL_CONFIGURATION_MONO;
//指定音频量化位数 ,在AudioFormaat类中指定了以下各种可能的常量。通常我们选择ENCODING_PCM_16BIT和ENCODING_PCM_8BIT PCM代表的是脉冲编码调制,它实际上是原始音频样本。
//因此可以设置每个样本的分辨率为16位或者8位,16位将占用更多的空间和处理能力,表示的音频也更加接近真实。
audioFormat = AudioFormat.ENCODING_PCM_16BIT;
recordBufSize = AudioRecord.getMinBufferSize(frequency, channelConfig, audioFormat);
audioRecord = new AudioRecord(audioSource, frequency, channelConfig, audioFormat, recordBufSize);
parent = new File(Environment.getExternalStorageDirectory().getAbsolutePath() + "/AudiioRecord");
if (!parent.exists()) {
parent.mkdirs();
}
}
- 开始录音
private void startRecord() {
if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.M) {
requestPermissions(new String[]{Manifest.permission.WRITE_EXTERNAL_STORAGE, Manifest.permission.RECORD_AUDIO}, 0);
} else {
getAudio()
}
}
注意加上权限
- 停止录音
private void stopRecord() {
isRecording = false;
}
- 获取录取的音频,并且写入文件
/**
* 获取录取的音频,并且写入文件
*/
private void getAudio() {
isRecording = true;
new Thread() {
@Override
public void run() {
super.run();
File file = new File(parent, "audio.pcm");
if (file.exists()) {
file.delete();
}
try {
file.createNewFile();
} catch (IOException e) {
e.printStackTrace();
}
DataOutputStream outputStream = null;
try {
outputStream = new DataOutputStream(new BufferedOutputStream(new FileOutputStream(file)));
byte[] buffer = new byte[recordBufSize];
//开始录音
audioRecord.startRecording();
int r = 0;
while (isRecording) {
int readResult = audioRecord.read(buffer, 0, recordBufSize);
for (int i = 0; i < readResult; i++) {
outputStream.write(buffer[i]);
}
r++;
Log.e("avPcm", "录制中....");
}
audioRecord.stop();
audioRecord.release();
outputStream.close();
} catch (Exception e) {
e.printStackTrace();
}
}
}.start();
}
以上不出意外,会在手机目录下/AudioRecord/audio.com
2.使用AudioTrack API播放PCM音频
private void playPcm() {
DataInputStream dis = null;
File file = new File(parent, "audio.pcm");
try {
dis = new DataInputStream(new BufferedInputStream(new FileInputStream(file)));
int bufferSize = AudioTrack.getMinBufferSize(frequency, channelConfig, audioFormat);
audioTrack = new AudioTrack(audioSource, frequency, channelConfig, audioFormat, bufferSize, AudioTrack.MODE_STREAM);
byte[] datas = new byte[bufferSize];
audioTrack.play();
while (true) {
int i = 0;
try {
while (dis.available() > 0 && i < datas.length) {
datas[i] = dis.readByte();
i++;
}
} catch (IOException e) {
e.printStackTrace();
}
audioTrack.write(datas, 0, datas.length);
//表示读取完了
if (i != bufferSize) {
audioTrack.stop();
audioTrack.release();
break;
}
}
} catch (FileNotFoundException e) {
e.printStackTrace();
}
}
3.将Pcm数据转化为Wav文件
wav 格式,与 bitmap 一样,都是微软开发的一种文件格式规范,它们都有一个相似之处,就是整个文件分为两部分,第一部分是“文件头”,记录重要的参数信息,对于音频而言,就包括:采样率、通道数、位宽等等,对于图像而言,就包括:图像的宽高、色彩位数等等;第二部分是“数据块”,即一帧一帧的二进制数据,对于音频而言,就是原始的 PCM 数据;对于图像而言,就是 RGB 数据。
首先,我们了解一下 wav 格式的“文件头”,可以参考这篇文章:《WAVE PCM soundfile format》
下面稍微解释下这个头文件
- ChunkID占四个字节 RIFF标记
- ChunkSize占四个字节 表示数据大小
- Format占四个字节 'WAVE '标记符
- FMT Chunk 占四个字节 'fmt '标记符
- Subchunk1Size :数据大小 4 bytes: size of 'fmt ' chunk
- AudioFormat 编码方式 10H为PCM编码格式 两个自己
- NumChannels /通道数
- SampleRate 采样率 8000, 44100, etc.
- ByteRate 音频数据传送速率,采样率通道数采样深度/8
- BlockAlign 确定系统一次要处理多少个这样字节的数据,确定缓冲区,通道数*采样位数
- BitsPerSample 每个样本的数据位数
- Data chunk data标记符
开始
private void convertWaveFile() {
FileInputStream in = null;
FileOutputStream out = null;
long totalAudioLen = 0;
long totalDataLen = totalAudioLen + 36;
long longSampleRate = frequency;
int channels = channelConfig;
long byteRate = 16 * frequency * channels / 8;
byte[] data = new byte[recordBufSize];
try {
File filePcm = new File(parent, "audio.pcm");
File fileWav = new File(parent, "audio.wav");
in = new FileInputStream(filePcm);
out = new FileOutputStream(fileWav);
//视频源的总长度
totalAudioLen = in.getChannel().size();
//由于不包括RIFF和WAV
totalDataLen = totalAudioLen + 36;
//先写入头文件
WriteWaveFileHeader(out, totalAudioLen, totalDataLen, longSampleRate, channels, byteRate);
while (in.read(data) != -1) {
//再写入数据源
out.write(data);
}
in.close();
out.close();
} catch (FileNotFoundException e) {
e.printStackTrace();
} catch (IOException e) {
e.printStackTrace();
}
}
- 写WAV文件头文件
private void WriteWaveFileHeader(FileOutputStream out, long totalAudioLen, long totalDataLen, long longSampleRate, int channels, long byteRate) {
byte[] header = new byte[44];
header[0] = 'R'; // RIFF
header[1] = 'I';
header[2] = 'F';
header[3] = 'F';
header[4] = (byte) (totalDataLen & 0xff);//数据大小
header[5] = (byte) ((totalDataLen >> 8) & 0xff);
header[6] = (byte) ((totalDataLen >> 16) & 0xff);
header[7] = (byte) ((totalDataLen >> 24) & 0xff);
header[8] = 'W';//WAVE
header[9] = 'A';
header[10] = 'V';
header[11] = 'E';
//FMT Chunk
header[12] = 'f'; // 'fmt '
header[13] = 'm';
header[14] = 't';
header[15] = ' ';//过渡字节
//数据大小
header[16] = 16; // 4 bytes: size of 'fmt ' chunk
header[17] = 0;
header[18] = 0;
header[19] = 0;
//编码方式 10H为PCM编码格式
header[20] = 1; // format = 1
header[21] = 0;
//通道数
header[22] = (byte) channels;
header[23] = 0;
//采样率,每个通道的播放速度
header[24] = (byte) (longSampleRate & 0xff);
header[25] = (byte) ((longSampleRate >> 8) & 0xff);
header[26] = (byte) ((longSampleRate >> 16) & 0xff);
header[27] = (byte) ((longSampleRate >> 24) & 0xff);
//音频数据传送速率,采样率*通道数*采样深度/8
header[28] = (byte) (byteRate & 0xff);
header[29] = (byte) ((byteRate >> 8) & 0xff);
header[30] = (byte) ((byteRate >> 16) & 0xff);
header[31] = (byte) ((byteRate >> 24) & 0xff);
// 确定系统一次要处理多少个这样字节的数据,确定缓冲区,通道数*采样位数
header[32] = (byte) (channels * 16 / 8);
header[33] = 0;
//每个样本的数据位数
header[34] = 16;
header[35] = 0;
//Data chunk
header[36] = 'd';//data
header[37] = 'a';
header[38] = 't';
header[39] = 'a';
header[40] = (byte) (totalAudioLen & 0xff);
header[41] = (byte) ((totalAudioLen >> 8) & 0xff);
header[42] = (byte) ((totalAudioLen >> 16) & 0xff);
header[43] = (byte) ((totalAudioLen >> 24) & 0xff);
try {
out.write(header, 0, 44);
} catch (IOException e) {
e.printStackTrace();
}
}
在这里有个问题,我将PCM转换为WAV后,使用本地播放器播放,声音完全变了,但是我用AudioTrack读取播放WAV文件播放又是正确的
4.AudioTrack读取WAV文件并播放
- 执行播放WAV文件
private void playWav() {
int bufferSizeInBytes = AudioTrack.getMinBufferSize(frequency, channelConfig, audioFormat);
mAudioTrack = new AudioTrack(audioSource, frequency, channelConfig, audioFormat, bufferSizeInBytes, AudioTrack.MODE_STREAM);
File fileWav = new File(parent, "audio.wav");
try {
dis = new DataInputStream(new FileInputStream(fileWav));
readWavHeader(dis);
new Thread(ReadDataRunnable).start();
} catch (IOException e) {
e.printStackTrace();
}
}
- 读取头文件
private void readWavHeader(DataInputStream dis) {
try {
byte[] byteIntValue = new byte[4];
byte[] byteShortValue = new byte[2];
//读取四个
String mChunkID = "" + (char) dis.readByte() + (char) dis.readByte() + (char) dis.readByte() + (char) dis.readByte();
Log.e("Wav_Header", "mChunkID:" + mChunkID);
dis.read(byteIntValue);
int chunkSize = byteArrayToInt(byteIntValue);
Log.e("Wav_Header", "chunkSize:" + chunkSize);
String format = "" + (char) dis.readByte() + (char) dis.readByte() + (char) dis.readByte() + (char) dis.readByte();
Log.e("Wav_Header", "format:" + format);
String subchunk1ID = "" + (char) dis.readByte() + (char) dis.readByte() + (char) dis.readByte() + (char) dis.readByte();
Log.e("Wav_Header", "subchunk1ID:" + subchunk1ID);
dis.read(byteIntValue);
int subchunk1Size = byteArrayToInt(byteIntValue);
Log.e("Wav_Header", "subchunk1Size:" + subchunk1Size);
dis.read(byteShortValue);
short audioFormat = byteArrayToShort(byteShortValue);
Log.e("Wav_Header", "audioFormat:" + audioFormat);
dis.read(byteShortValue);
short numChannels = byteArrayToShort(byteShortValue);
Log.e("Wav_Header", "numChannels:" + numChannels);
dis.read(byteIntValue);
int sampleRate = byteArrayToInt(byteIntValue);
Log.e("Wav_Header", "sampleRate:" + sampleRate);
dis.read(byteIntValue);
int byteRate = byteArrayToInt(byteIntValue);
Log.e("Wav_Header", "byteRate:" + byteRate);
dis.read(byteShortValue);
short blockAlign = byteArrayToShort(byteShortValue);
Log.e("Wav_Header", "blockAlign:" + blockAlign);
dis.read(byteShortValue);
short btsPerSample = byteArrayToShort(byteShortValue);
Log.e("Wav_Header", "btsPerSample:" + btsPerSample);
String subchunk2ID = "" + (char) dis.readByte() + (char) dis.readByte() + (char) dis.readByte() + (char) dis.readByte();
Log.e("Wav_Header", "subchunk2ID:" + subchunk2ID);
dis.read(byteIntValue);
int subchunk2Size = byteArrayToInt(byteIntValue);
Log.e("subchunk2Size", "subchunk2Size:" + subchunk2Size);
} catch (IOException e) {
e.printStackTrace();
}
}
- byte转int
private int byteArrayToInt(byte[] byteIntValue) {
return ByteBuffer.wrap(byteIntValue).order(ByteOrder.LITTLE_ENDIAN).getInt();
}
- byte转short
private short byteArrayToShort(byte[] byteShortValue) {
return ByteBuffer.wrap(byteShortValue).order(ByteOrder.LITTLE_ENDIAN).getShort();
}
- 线程持续读取数据,并播放数据
private Runnable ReadDataRunnable = new Runnable() {
@Override
public void run() {
byte[] buffer = new byte[1024 * 2];
while (readData(buffer, 0, buffer.length) > 0) {
if (mAudioTrack.write(buffer, 0, buffer.length) != buffer.length) {
}
mAudioTrack.play();
}
mAudioTrack.stop();
mAudioTrack.release();
try {
if (dis != null) {
dis.close();
dis = null;
}
} catch (IOException e) {
e.printStackTrace();
}
}
};
- 读取数据Data
public int readData(byte[] buffer, int offset, int count) {
try {
int nbytes = dis.read(buffer, offset, count);
if (nbytes == -1) {
return 0;
}
return nbytes;
} catch (IOException e) {
e.printStackTrace();
}
return -1;
}
-
看看读取头文件时的数据
1521180355(1).png
总结
总体来说还是比较简单的,就是搞清楚wav文件的格式,基本也就有了!
网友评论