美文网首页iApp技术文章
iApp录音实现声音反馈

iApp录音实现声音反馈

作者: 抹茶NightSky | 来源:发表于2019-04-06 14:19 被阅读0次

    demo简介

    iApp要想实现边录边听功能就不能使用iApp带有的bly来进行录音。
    既然iApp的代码不能满足我们,我们就使用Android SDK来满足自己!
    利用AndroidSDK提供的AudioRecord来进行录音采样就能获取音频的裸流。
    实现了录音,我们还有一个播放音频这时候难道要用iApp提供的bfm吗?
    我们可以使用AndroidSDK提供的AudioTrack来播放录下来的声音。
    话就不多说了下面就开始敲代码吧!喵~

    mian载入事件code

    //录音缓存文件
    ss audiotmpfile = "/sdcard/RecordAudioFile/audio_tmp.pcm"
    //录音保存文件
    ss audiosavefile = "/sdcard/RecordAudioFile/audio.wav"
    //音源 0.默认 1.是麦克风 5.相机前置麦克风 8.扬声器
    ss audiosource = 1
    //采样率 48000-4000Hz
    ss sampleRateInHz = 44100
    //通道 12.双通道 2.单通道 4.左通道 8.右通道; (通道指耳机的左和右)立体音
    ss channel = 12
    //音频数据格式 2.16bit 3.8bit
    ss audioformat = 2
    //计算采样的缓冲区大小
    java(ss.samplebuffersize,null,"android.media.AudioRecord.getMinBufferSize","int",ss.sampleRateInHz,"int",2,"int",ss.audioformat)
    //------视图单机监听
    cls("android.view.View$OnClickListener",c)
    ss type = 0
    javacb(hd,c)
    {
      sgsz(st_aS,0,view)
      java(index,ss.ids,"java.util.ArrayList.indexOf","Object",view)
      f(index==0)
      {
        //效果按钮
        ug(view,"text",text)
        f(text=="效果(普通)")
        {
          us(view,"text","效果(反听)")
          ss type = 1
        }
        else
        {
          us(view,"text","效果(普通)")
          ss type = 0
        }
      }
      else f(index==1)
      {
        //录音按钮
        fn lly.createAudioRecord(ss.sampleRateInHz)
        f(ss.type==0)
        {
          fn lly.startRecord(ss.audioRecord,ss.samplebuffersize,ss.audiotmpfile)
        }
        else f(ss.type==1)
        {
          fn lly.createAudioTrack(ss.sampleRateInHz)
          fn lly.startRecord(ss.audioRecord,ss.track,ss.samplebuffersize,ss.audiotmpfile)
        }
      }
      else f(index==2)
      {
        //暂停按钮
        ug(view,"text",text)
        f(text=="暂停录音")
        {
          fn lly.pauseRecord();
          us(view,"text","继续录音")
        }
        else
        {
          us(view,"text","暂停录音")
          gslist(ss.ids,1,view)
          java(null,view,"android.view.View.performClick")
        }
      }
      else f(index==3)
      {
        //停止按钮
        fn lly.stopRecord(ss.audioRecord,ss.track,ss.audiotmpfile,ss.audiosavefile)
        tw("已保存录音文件")
      }
      else f(index==4)
      {
        //播放按钮
        fn lly.AudioPlay(ss.audiosavefile)
      }
      f(ss.error!=null)
      {
        syso(ss.error)
        tw(ss.error)
        ss error = null
      }
    }
    //------视图单机监听------
    //------放置按钮--------
    sl("效果(普通)|开始录音|暂停录音|停止录音|播放录音","|",bntexts)
    for(str;bntexts)
    {
      nvw(3,1,"按钮","width=-2\nheight=-2\ntext="+str,bn)
      java(null,bn,"android.view.View.setOnClickListener","android.view.View$OnClickListener",hd)
      aslist(ss.ids,bn)
    }
    //------放置按钮--------
    

    fn函数库

    先新建一个lly.myu,把下面的copy进去。

    剩下的操作直接调用就行了!
    反听效果有一个问题 AudioRecord读取数据需要150ms左右然后用AudioTrack写入也需要150ms左右造成了300ms左右才有声音的延迟。

    //创建AudioRecord对象
    fn createAudioRecord(samplebuffersize)
    javanew(ss.audioRecord,"android.media.AudioRecord","int",1,"int",ss.sampleRateInHz,"int",12,"int",2,"int",samplebuffersize)
    end fn
    
    //创建AudioTrack对象
    fn createAudioTrack(samplebuffersize)
    javanew(ss.track,"android.media.AudioTrack","int",3,"int",ss.sampleRateInHz,"int",12,"int",2,"int",samplebuffersize,"int",1)
    //java(null,ss.track,"android.media.AudioTrack.play")
    end fn
    
    //开始普通录音
    fn startRecord(audiorecord,samplebuffersize,filepath)
    f(ss.isRecord)
    {
      ss error = "method:startRecord message:Recording"
      endcode
    }
    t()
    {
      fn lly.AudioRecordWriteStream(audiorecord,samplebuffersize,filepath)
    }
    ss isRecord = true
    end fn
    
    //开始带反音效果录制
    fn startRecord(audiorecord,audiotrack,samplebuffersize,filepath)
    f(ss.isRecord)
    {
      ss error = "method:startRecord message:Recording"
      endcode
    }
    t()
    {
      fn lly.ReturnBackAudioWriteStream(audiorecord,audiotrack,samplebuffersize,filepath)
    }
    ss isRecord = true
    end fn
    
    //暂停录音(可以不调用本函数 直接使用(ss isRecord = false)即可;
    fn pauseRecord()
    ss isRecord = false
    end fn
    
    //停止录音
    fn stopRecord(audiorecord,audiotrack,filepath,newfilepath)
    ss isRecord = false
    java(null,audiorecord,"android.media.AudioRecord.release")
    f(ss.audiotrack!=null)
    {
      java(null,audiotrack,"android.media.AudioTrack.release")
    }
    fn lly.pcmDataAddHeader(filepath,newfilepath)
    end fn
    
    //输出普通音频到文件里
    fn AudioRecordWriteStream(audiorecord,samplebuffersize,filepath)
    cls("byte",c)
    //------创建目录和文件------
    javanew(file,"java.io.File","String",filepath)
    java(filep,file,"java.io.File.getParentFile")
    java(iserror,filep,"java.io.File.mkdirs")
    java(iserror,file,"java.io.File.createNewFile")
    //------创建目录和文件------
    //创建追加输出流
    javanew(os,"java.io.FileOutputStream","java.io.File",file,"boolean",true)
    java(bytes,null,"java.lang.reflect.Array.newInstance","java.lang.Class",c,"int",samplebuffersize)
    //开始录音
    java(null,audiorecord,"android.media.AudioRecord.startRecording")
    //读取采样数据
    java(state,audiorecord,"android.media.AudioRecord.read","byte[]",bytes,"int",0,"int",samplebuffersize)
    w(state!=-3&&ss.isRecord)
    {
    java(state,audiorecord,"android.media.AudioRecord.read","byte[]",bytes,"int",0,"int",samplebuffersize)
    java(null,os,"java.io.OutputStream.write","byte[]",bytes,"int",0,"int",samplebuffersize)
    }
    java(null,os,"java.io.OutputStream.close")
    java(null,audiorecord,"android.media.AudioRecord.stop")
    end fn
    
    //输出带反音音频到文件里
    fn ReturnBackAudioWriteStream(audiorecord,audiotrack,samplebuffersize,filepath)
    cls("byte",c)
    //------创建目录和文件------
    javanew(file,"java.io.File","String",filepath)
    java(filep,file,"java.io.File.getParentFile")
    java(iserror,filep,"java.io.File.mkdirs")
    java(iserror,file,"java.io.File.createNewFile")
    //------创建目录和文件------
    //创建追加输出流
    javanew(os,"java.io.FileOutputStream","java.io.File",file,"boolean",true)
    //播放回音
    java(null,audiotrack,"android.media.AudioTrack.play")
    java(bytes,null,"java.lang.reflect.Array.newInstance","java.lang.Class",c,"int",samplebuffersize)
    //开始录音
    java(null,audiorecord,"android.media.AudioRecord.startRecording")
    //读取采样数据
    java(state,audiorecord,"android.media.AudioRecord.read","byte[]",bytes,"int",0,"int",samplebuffersize)
    w(state!=-3&&ss.isRecord)
    {
    java(state,audiorecord,"android.media.AudioRecord.read","byte[]",bytes,"int",0,"int",samplebuffersize)
    java(null,audiotrack,"android.media.AudioTrack.write","byte[]",bytes,"int",0,"int",samplebuffersize)
    java(null,os,"java.io.OutputStream.write","byte[]",bytes,"int",0,"int",samplebuffersize)
    }
    //关闭输出流
    java(null,os,"java.io.OutputStream.close")
    //在这里暂停是为了防止用户按下停止时AudioRevord还在采样但已经停止工作会结束当前函数,所以要吧关闭输出流写在他们上面!
    java(null,audiorecord,"android.media.AudioRecord.stop")
    java(null,audiotrack,"android.media.AudioTrack.pause")
    end fn
    
    //给裸流添加头数据(因为iApp没办法使用一些运算符所以这里的数据是固定的所以可能会照成无法播放)
    fn pcmDataAddHeader(filepath,newfilepath)
    //------创建目录和文件------
    javanew(file,"java.io.File","String",filepath)
    javanew(newfile,"java.io.File","String",newfilepath)
    java(filep,newfile,"java.io.File.getParentFile")
    java(iserror,filep,"java.io.File.mkdirs")
    java(iserror,newfile,"java.io.File.createNewFile")
    //------创建目录和文件------
    javanew(is,"java.io.FileInputStream","java.io.File",file)
    javanew(os,"java.io.FileOutputStream","java.io.File",newfile)
    //头数据
    s a = "82 73 70 70 36 115 248 0 87 65 86 69 102 109 116 32 16 0 0 0 1 0 2 0 68 172 0 0 16 177 2 0 4 0 16 0 100 97 116 97 0 115 248 0"
    sl(a," ",b)
    //输出44位头数据
    for(c;b)
    {
    s(c+0,c)
    java(null,os,"java.io.OutputStream.write","int",c)
    }
    java(len,is,"java.io.InputStream.available")
    cls("byte",c)
    java(bytes,null,"java.lang.reflect.Array.newInstance","java.lang.Class",c,"int",len)
    //拷贝裸数据进来
    java(len,is,"java.io.InputStream.read","byte[]",bytes,"int",0,"int",len)
    java(null,os,"java.io.OutputStream.write","byte[]",bytes,"int",0,"int",len)
    java(null,os,"java.io.OutputStream.close")
    end fn
    
    //播放音频
    fn AudioPlay(filepath)
    bfm(filepath,ss.mp)
    bfms(ss.mp,"st")
    bfms(ss.mp,"sl",true)
    bfms(ss.mp, "ip", c)
    f(!c)
    {
      ss error = "method:AudioPlay message:PlayError"
      endcode
    }
    utw(null,"AudioPlayer","alertwindow.iyu","exit",true,v)
    {
      bfms(ss.mp, "sp")
      bfms(ss.mp, "re")
      ss mp = null
    }
    gvs(v,2,ss.vp)
    gvs(v,3,ss.vt)
    gvs(v,4,ss.vtt)
    bfms(ss.mp, "dn", dn)
    us(ss.vp,"max",dn)
    fn lly.timetoformat(dn)
    us(ss.vtt,"text",ss.timeformat)
    ssj(ss.vp,"touchmonitor")
    {
      ug(st_vW,"progress",ps)
      bfms(ss.mp, "seekto", ps)
    }
    t()
    {
      w(ss.mp!=null)
      {
        ufnsui()
        {
          bfms(ss.mp, "cn", cdn)
          us(ss.vp,"progress",cdn)
          fn lly.timetoformat(cdn)
          us(ss.vt,"text",ss.timeformat)
        }
      stop(1000)
      }
    }
    end fn
    
    //给时间加格式
    fn timetoformat(num)
    s(num/1000/60,m)
    s(num/1000%60,s)
    f(m<10)
    {
      ss("0"+m,m)
    }
    f(s<10)
    {
      ss("0"+s,s)
    }
    ss(m+":"+s,ss.timeformat)
    end fn
    
    效果图
    源码下载(本接口不经常开启)

    入群下载(如果无法跳转请复制群号)
    Q群:947756726

    相关文章

      网友评论

        本文标题:iApp录音实现声音反馈

        本文链接:https://www.haomeiwen.com/subject/jxyriqtx.html