1:简单时序
使用中遇到使用MediaRecorder停止录像的时候卡顿的问题,将MediaRecorder停止录像 的流程整理了下(基于Android 6.0 ,不涉及HAL)。
PS:MediaRecorder进程能进行录像的真正原因是该进程运行中持有了Camera的一个CameraSource 和一个AudiaoSource,CameraSource持有Camera Client,AudioSource 持有AudioRecorde Client,在用户调用了相关接口之后,分别操作对应的Hal层 实现对音视频的操作。
流程图较简单就是一路不停的stop,但是在实际调用哪个stop的时候就需要进行判断,下面的流程图分析了对应的文件名。
mediarecorder停止录像流程图.png2:详细步骤
Meidia进程进行停止的详细流程:
frameworks\base\media\java\android\media\MediaRecorder.java
Mediarecorder_stop.png
Java层实际调用JNI进行停止,
frameworks\base\media\jni\android_media_MediaRecorder.cpp
amdroid_media_mediarecorder_stop.png
status_t MediaRecorder::stop()
{
ALOGV("stop");
if (mMediaRecorder == NULL) {
ALOGE("media recorder is not initialized yet");
return INVALID_OPERATION;
}
if (!(mCurrentState & MEDIA_RECORDER_RECORDING)
&& !(mCurrentState & MEDIA_RECORDER_PAUSED)) {
ALOGE("stop called in an invalid state: %d", mCurrentState);
return INVALID_OPERATION;
}
status_t ret = mMediaRecorder->stop();
if (OK != ret) {
ALOGE("stop failed: %d", ret);
mCurrentState = MEDIA_RECORDER_ERROR;
return ret;
}
/*<DTS2015082607763 huangwen 00181596 20150826 begin */
POWERPUSH(178, "", "", "");
/* DTS2015082607763 huangwen 00181596 20150826 end>*/
// FIXME:
// stop and reset are semantically different.
// We treat them the same for now, and will change this in the future.
doCleanUp();
mCurrentState = MEDIA_RECORDER_IDLE;
return ret;
}
到处,关键问题是mMediaRecorder来stop的,mMediaRecorder 是哪个兑对象,经过查看发现是
frameworks\av\media\libmediaplayerservice\MediaPlayerService.cpp
发现在Android 6.0 中本地录像时,MediaClient 其实就是 StagefrightRecorder,因此查看StagefrightRecorder如何stop
发现其实就是 通过mWriter来stop的,那么mWriter 是哪个?发现其实mWriter 就是高层在使用MediaRecorder的时候根据传进来的参数来处理的,也就是下面的方法构造的
frameworks\av\media\libmediaplayerservice\StagefrightRecorder.cpp
StagefrightRecorder_setup.png
问题随机转换成MPEG4Writer 如何stop
frameworks\av\include\media\stagefright\MPEG4Writer.h
mpeg.png
发现其实在MPEG4Writer中stop是使用reset函数来stop,
MPEG4Writer_reset.png
在此函数中发现其实就是用MPEG4Writer中的内部类中的stop
track_stop.png
到此问题再一次转换,是通过mSource来stop调录像的,那么mSource 到底是什么对象?
再次查看代码 mSource 在高层调用prepare的时候分别将AudioSource和VideoSource创建出来
prepare.png
添加VedioSource,即
addvideosource.png
添加AudioSource
status_t StagefrightRecorder::setupAudioEncoder(const sp<MediaWriter>& writer) {
status_t status = BAD_VALUE;
if (OK != (status = checkAudioEncoderCapabilities())) {
return status;
}
switch(mAudioEncoder) {
case AUDIO_ENCODER_AMR_NB:
case AUDIO_ENCODER_AMR_WB:
case AUDIO_ENCODER_AAC:
case AUDIO_ENCODER_HE_AAC:
case AUDIO_ENCODER_AAC_ELD:
break;
default:
ALOGE("Unsupported audio encoder: %d", mAudioEncoder);
return UNKNOWN_ERROR;
}
sp<MediaSource> audioEncoder = createAudioSource();
if (audioEncoder == NULL) {
return UNKNOWN_ERROR;
}
writer->addSource(audioEncoder);
return OK;
}
sp<MediaSource> StagefrightRecorder::createAudioSource() {
int32_t sourceSampleRate = mSampleRate;
if (mCaptureFpsEnable && mCaptureFps >= mFrameRate) {
// Upscale the sample rate for slow motion recording.
// Fail audio source creation if source sample rate is too high, as it could
// cause out-of-memory due to large input buffer size. And audio recording
// probably doesn't make sense in the scenario, since the slow-down factor
// is probably huge (eg. mSampleRate=48K, mCaptureFps=240, mFrameRate=1).
const static int32_t SAMPLE_RATE_HZ_MAX = 192000;
sourceSampleRate =
(mSampleRate * mCaptureFps + mFrameRate / 2) / mFrameRate;
if (sourceSampleRate < mSampleRate || sourceSampleRate > SAMPLE_RATE_HZ_MAX) {
ALOGE("source sample rate out of range! "
"(mSampleRate %d, mCaptureFps %.2f, mFrameRate %d",
mSampleRate, mCaptureFps, mFrameRate);
return NULL;
}
}
sp<AudioSource> audioSource =
new AudioSource(
mAudioSource,
mOpPackageName,
sourceSampleRate,
mAudioChannels,
mSampleRate);
status_t err = audioSource->initCheck();
if (err != OK) {
ALOGE("audio source is not initialized");
return NULL;
}
sp<AMessage> format = new AMessage;
switch (mAudioEncoder) {
case AUDIO_ENCODER_AMR_NB:
case AUDIO_ENCODER_DEFAULT:
format->setString("mime", MEDIA_MIMETYPE_AUDIO_AMR_NB);
break;
case AUDIO_ENCODER_AMR_WB:
format->setString("mime", MEDIA_MIMETYPE_AUDIO_AMR_WB);
break;
case AUDIO_ENCODER_AAC:
format->setString("mime", MEDIA_MIMETYPE_AUDIO_AAC);
format->setInt32("aac-profile", OMX_AUDIO_AACObjectLC);
break;
case AUDIO_ENCODER_HE_AAC:
format->setString("mime", MEDIA_MIMETYPE_AUDIO_AAC);
format->setInt32("aac-profile", OMX_AUDIO_AACObjectHE);
break;
case AUDIO_ENCODER_AAC_ELD:
format->setString("mime", MEDIA_MIMETYPE_AUDIO_AAC);
format->setInt32("aac-profile", OMX_AUDIO_AACObjectELD);
break;
default:
ALOGE("Unknown audio encoder: %d", mAudioEncoder);
return NULL;
}
int32_t maxInputSize;
CHECK(audioSource->getFormat()->findInt32(
kKeyMaxInputSize, &maxInputSize));
format->setInt32("max-input-size", maxInputSize);
format->setInt32("channel-count", mAudioChannels);
format->setInt32("sample-rate", mSampleRate);
format->setInt32("bitrate", mAudioBitRate);
if (mAudioTimeScale > 0) {
format->setInt32("time-scale", mAudioTimeScale);
}
format->setInt32("priority", 0 /* realtime */);
sp<MediaSource> audioEncoder =
MediaCodecSource::Create(mLooper, format, audioSource);
mAudioSourceNode = audioSource;
if (audioEncoder == NULL) {
ALOGE("Failed to create audio encoder");
}
return audioEncoder;
}
因此此处stop是调用AudioSource的stop,即
audiosource_stop.png audiorecorder_reset.png
问题转化成AudioRecorder的stop问题
frameworks\av\media\libmedia\AudioRecord.cpp
audiorecorder_reset.png
到此处 继续查看这个mAudioRecord 是谁
openRecord.png
此处就到了Audio世界中最重要的一处,audioFlinger中,查看此函数发现返回的对象是一个RecordHandle
frameworks\av\services\audioflinger\AudioFlinger.cpp
newRecordHandle.png
frameworks\av\services\audioflinger\Tracks.cpp
void AudioFlinger::RecordThread::RecordTrack::stop()
{
sp<ThreadBase> thread = mThread.promote();
if (thread != 0) {
RecordThread *recordThread = (RecordThread *)thread.get();
if (recordThread->stop(this) && isExternalTrack()) {
AudioSystem::stopInput(mThreadIoHandle, (audio_session_t)mSessionId);
/*DTS2015060102435 dumingliang Huawei Sound Trigger, 20150605 begin*/
//DTS2014091501224 zhangwenyang 20141129 begin
recordThread->mAudioFlinger->enableHwSoundTrigger(true);
//DTS2014091501224 zhangwenyang 20141129 end
/*DTS2015060102435 dumingliang Huawei Sound Trigger, 20150605 end*/
/*DTS2015061603512 dumingliang for huawei algo processing,20150618, begin*/
recordThread->mAudioFlinger->huaweiAudioAlgosetParameter(String8("RECORD_SCENE=off"));
/*DTS2015061603512 dumingliang for huawei algo processing,20150618, end*/
recordThread->mAudioFlinger->setCallingAppName(-1);//must be very end
}
}
}
到此我们就知道到底是谁在做stop的动作了,因为本地录制开了一个RecordThread,此线程专门管理本地录制中的录音线程,在此处将次线程关闭即将录像中的音频关闭了,
继续查看此线程的关闭
frameworks\av\services\audioflinger\Threads.cpp
bool AudioFlinger::RecordThread::stop(RecordThread::RecordTrack* recordTrack) {
ALOGV("RecordThread::stop");
AutoMutex _l(mLock);
if (mActiveTracks.indexOf(recordTrack) != 0 || recordTrack->mState == TrackBase::PAUSING) {
return false;
}
// note that threadLoop may still be processing the track at this point [without lock]
recordTrack->mState = TrackBase::PAUSING;
// signal thread to stop
mWaitWorkCV.broadcast();
// do not wait for mStartStopCond if exiting
if (exitPending()) {
return true;
}
// mWaitWorkCV.broadcast();
// FIXME incorrect usage of wait: no explicit predicate or loop
mStartStopCond.wait(mLock);
// if we have been restarted, recordTrack is in mActiveTracks here
if (exitPending() || mActiveTracks.indexOf(recordTrack) != 0) {
ALOGV("Record stopped OK");
return true;
}
return false;
}
在此处即可知道次线程在启动之后一直在运行,当高层将动作stop时即到次线程去关闭,最后再看下次线程的动作
bool AudioFlinger::RecordThread::threadLoop()
{
nsecs_t lastWarning = 0;
inputStandBy();
reacquire_wakelock:
sp<RecordTrack> activeTrack;
int activeTracksGen;
{
Mutex::Autolock _l(mLock);
size_t size = mActiveTracks.size();
activeTracksGen = mActiveTracksGen;
if (size > 0) {
// FIXME an arbitrary choice
activeTrack = mActiveTracks[0];
acquireWakeLock_l(activeTrack->uid());
if (size > 1) {
SortedVector<int> tmp;
for (size_t i = 0; i < size; i++) {
tmp.add(mActiveTracks[i]->uid());
}
updateWakeLockUids_l(tmp);
}
} else {
acquireWakeLock_l(-1);
}
}
// used to request a deferred sleep, to be executed later while mutex is unlocked
uint32_t sleepUs = 0;
// loop while there is work to do
for (;;) {
Vector< sp<EffectChain> > effectChains;
// sleep with mutex unlocked
if (sleepUs > 0) {
ATRACE_BEGIN("sleep");
usleep(sleepUs); /* [false alarm] */
ATRACE_END();
sleepUs = 0;
}
// activeTracks accumulates a copy of a subset of mActiveTracks
Vector< sp<RecordTrack> > activeTracks;
// reference to the (first and only) active fast track
sp<RecordTrack> fastTrack;
// reference to a fast track which is about to be removed
sp<RecordTrack> fastTrackToRemove;
{ // scope for mLock
Mutex::Autolock _l(mLock);
processConfigEvents_l();
// check exitPending here because checkForNewParameters_l() and
// checkForNewParameters_l() can temporarily release mLock
if (exitPending()) {
break;
}
// if no active track(s), then standby and release wakelock
size_t size = mActiveTracks.size();
if (size == 0) {
standbyIfNotAlreadyInStandby();
// exitPending() can't become true here
releaseWakeLock_l();
ALOGV("RecordThread: loop stopping");
// go to sleep
mWaitWorkCV.wait(mLock);
ALOGV("RecordThread: loop starting");
goto reacquire_wakelock;
}
if (mActiveTracksGen != activeTracksGen) {
activeTracksGen = mActiveTracksGen;
SortedVector<int> tmp;
for (size_t i = 0; i < size; i++) {
tmp.add(mActiveTracks[i]->uid());
}
updateWakeLockUids_l(tmp);
}
bool doBroadcast = false;
for (size_t i = 0; i < size; ) {
activeTrack = mActiveTracks[i];
if (activeTrack->isTerminated()) {
if (activeTrack->isFastTrack()) {
ALOG_ASSERT(fastTrackToRemove == 0);
fastTrackToRemove = activeTrack;
}
removeTrack_l(activeTrack);
mActiveTracks.remove(activeTrack);
mActiveTracksGen++;
size--;
continue;
}
TrackBase::track_state activeTrackState = activeTrack->mState;
switch (activeTrackState) {
case TrackBase::PAUSING:
mActiveTracks.remove(activeTrack);
mActiveTracksGen++;
doBroadcast = true;
size--;
continue;
case TrackBase::STARTING_1:
sleepUs = 10000;
i++;
continue;
case TrackBase::STARTING_2:
doBroadcast = true;
mStandby = false;
activeTrack->mState = TrackBase::ACTIVE;
break;
case TrackBase::ACTIVE:
break;
case TrackBase::IDLE:
i++;
continue;
default:
LOG_ALWAYS_FATAL("Unexpected activeTrackState %d", activeTrackState);
}
activeTracks.add(activeTrack);
i++;
if (activeTrack->isFastTrack()) {
ALOG_ASSERT(!mFastTrackAvail);
ALOG_ASSERT(fastTrack == 0);
fastTrack = activeTrack;
}
}
if (doBroadcast) {
mStartStopCond.broadcast();
}
// sleep if there are no active tracks to process
if (activeTracks.size() == 0) {
if (sleepUs == 0) {
sleepUs = kRecordThreadSleepUs;
}
continue;
}
sleepUs = 0;
lockEffectChains_l(effectChains);
}
// thread mutex is now unlocked, mActiveTracks unknown, activeTracks.size() > 0
size_t size = effectChains.size();
for (size_t i = 0; i < size; i++) {
// thread mutex is not locked, but effect chain is locked
effectChains[i]->process_l();
}
// Push a new fast capture state if fast capture is not already running, or cblk change
if (mFastCapture != 0) {
FastCaptureStateQueue *sq = mFastCapture->sq();
FastCaptureState *state = sq->begin();
bool didModify = false;
FastCaptureStateQueue::block_t block = FastCaptureStateQueue::BLOCK_UNTIL_PUSHED;
if (state->mCommand != FastCaptureState::READ_WRITE /* FIXME &&
(kUseFastMixer != FastMixer_Dynamic || state->mTrackMask > 1)*/) {
if (state->mCommand == FastCaptureState::COLD_IDLE) {
int32_t old = android_atomic_inc(&mFastCaptureFutex);
if (old == -1) {
(void) syscall(__NR_futex, &mFastCaptureFutex, FUTEX_WAKE_PRIVATE, 1);
}
}
state->mCommand = FastCaptureState::READ_WRITE;
#if 0 // FIXME
mFastCaptureDumpState.increaseSamplingN(mAudioFlinger->isLowRamDevice() ?
FastThreadDumpState::kSamplingNforLowRamDevice :
FastThreadDumpState::kSamplingN);
#endif
didModify = true;
}
audio_track_cblk_t *cblkOld = state->mCblk;
audio_track_cblk_t *cblkNew = fastTrack != 0 ? fastTrack->cblk() : NULL;
if (cblkNew != cblkOld) {
state->mCblk = cblkNew;
// block until acked if removing a fast track
if (cblkOld != NULL) {
block = FastCaptureStateQueue::BLOCK_UNTIL_ACKED;
}
didModify = true;
}
sq->end(didModify);
if (didModify) {
sq->push(block);
#if 0
if (kUseFastCapture == FastCapture_Dynamic) {
mNormalSource = mPipeSource;
}
#endif
}
}
// now run the fast track destructor with thread mutex unlocked
fastTrackToRemove.clear();
// Read from HAL to keep up with fastest client if multiple active tracks, not slowest one.
// Only the client(s) that are too slow will overrun. But if even the fastest client is too
// slow, then this RecordThread will overrun by not calling HAL read often enough.
// If destination is non-contiguous, first read past the nominal end of buffer, then
// copy to the right place. Permitted because mRsmpInBuffer was over-allocated.
int32_t rear = mRsmpInRear & (mRsmpInFramesP2 - 1);
ssize_t framesRead;
// If an NBAIO source is present, use it to read the normal capture's data
if (mPipeSource != 0) {
size_t framesToRead = mBufferSize / mFrameSize;
framesRead = mPipeSource->read((uint8_t*)mRsmpInBuffer + rear * mFrameSize,
framesToRead, AudioBufferProvider::kInvalidPTS);
if (framesRead == 0) {
// since pipe is non-blocking, simulate blocking input
sleepUs = (framesToRead * 1000000LL) / mSampleRate;
}
// otherwise use the HAL / AudioStreamIn directly
} else {
ssize_t bytesRead = mInput->stream->read(mInput->stream,
(uint8_t*)mRsmpInBuffer + rear * mFrameSize, mBufferSize);
if (bytesRead < 0) {
framesRead = bytesRead;
} else {
framesRead = bytesRead / mFrameSize;
}
/*DTS2016040508389 guoling/00349831 20160405 begin*/
HwMediaMonitorManager::saveMediaPcmData(PCM_WRITE_IN_RECORDTHREAD, (const char*)mRsmpInBuffer + rear * mFrameSize, bytesRead);
/*DTS2016040508389 guoling/00349831 20160405 end*/
}
if (/*framesRead < 0 || */(framesRead == 0 && mPipeSource == 0)) {
ALOGE("read failed: framesRead=%d", framesRead);
// Force input into standby so that it tries to recover at next read attempt
inputStandBy();
sleepUs = kRecordThreadSleepUs;
}
if (framesRead <= 0) {
goto unlock;
}
ALOG_ASSERT(framesRead > 0);
if (mTeeSink != 0) {
(void) mTeeSink->write((uint8_t*)mRsmpInBuffer + rear * mFrameSize, framesRead);
}
// If destination is non-contiguous, we now correct for reading past end of buffer.
{
size_t part1 = mRsmpInFramesP2 - rear;
if ((size_t) framesRead > part1) {
memcpy(mRsmpInBuffer, (uint8_t*)mRsmpInBuffer + mRsmpInFramesP2 * mFrameSize,
(framesRead - part1) * mFrameSize); /* [false alarm] */
}
}
rear = mRsmpInRear += framesRead;
size = activeTracks.size();
// loop over each active track
for (size_t i = 0; i < size; i++) {
activeTrack = activeTracks[i];
// skip fast tracks, as those are handled directly by FastCapture
if (activeTrack->isFastTrack()) {
continue;
}
// TODO: This code probably should be moved to RecordTrack.
// TODO: Update the activeTrack buffer converter in case of reconfigure.
enum {
OVERRUN_UNKNOWN,
OVERRUN_TRUE,
OVERRUN_FALSE
} overrun = OVERRUN_UNKNOWN;
// loop over getNextBuffer to handle circular sink
for (;;) {
activeTrack->mSink.frameCount = ~0;
status_t status = activeTrack->getNextBuffer(&activeTrack->mSink);
size_t framesOut = activeTrack->mSink.frameCount;
LOG_ALWAYS_FATAL_IF((status == OK) != (framesOut > 0));
// check available frames and handle overrun conditions
// if the record track isn't draining fast enough.
bool hasOverrun;
size_t framesIn;
activeTrack->mResamplerBufferProvider->sync(&framesIn, &hasOverrun);
if (hasOverrun) {
overrun = OVERRUN_TRUE;
}
if (framesOut == 0 || framesIn == 0) {
break;
}
// Don't allow framesOut to be larger than what is possible with resampling
// from framesIn.
// This isn't strictly necessary but helps limit buffer resizing in
// RecordBufferConverter. TODO: remove when no longer needed.
framesOut = min(framesOut,
destinationFramesPossible(
framesIn, mSampleRate, activeTrack->mSampleRate));
// process frames from the RecordThread buffer provider to the RecordTrack buffer
framesOut = activeTrack->mRecordBufferConverter->convert(
activeTrack->mSink.raw, activeTrack->mResamplerBufferProvider, framesOut);
if (framesOut > 0 && (overrun == OVERRUN_UNKNOWN)) {
overrun = OVERRUN_FALSE;
}
if (activeTrack->mFramesToDrop == 0) {
if (framesOut > 0) {
activeTrack->mSink.frameCount = framesOut;
activeTrack->releaseBuffer(&activeTrack->mSink);
}
} else {
// FIXME could do a partial drop of framesOut
if (activeTrack->mFramesToDrop > 0) {
activeTrack->mFramesToDrop -= framesOut;
if (activeTrack->mFramesToDrop <= 0) {
activeTrack->clearSyncStartEvent();
}
} else {
activeTrack->mFramesToDrop += framesOut;
if (activeTrack->mFramesToDrop >= 0 || activeTrack->mSyncStartEvent == 0 ||
activeTrack->mSyncStartEvent->isCancelled()) {
ALOGW("Synced record %s, session %d, trigger session %d",
(activeTrack->mFramesToDrop >= 0) ? "timed out" : "cancelled",
activeTrack->sessionId(),
(activeTrack->mSyncStartEvent != 0) ?
activeTrack->mSyncStartEvent->triggerSession() : 0);
activeTrack->clearSyncStartEvent();
}
}
}
if (framesOut == 0) {
break;
}
}
switch (overrun) {
case OVERRUN_TRUE:
// client isn't retrieving buffers fast enough
if (!activeTrack->setOverflow()) {
nsecs_t now = systemTime();
// FIXME should lastWarning per track?
if ((now - lastWarning) > kWarningThrottleNs) {
ALOGW("RecordThread: buffer overflow");
lastWarning = now;
}
}
break;
case OVERRUN_FALSE:
activeTrack->clearOverflow();
break;
case OVERRUN_UNKNOWN:
break;
}
}
unlock:
// enable changes in effect chain
unlockEffectChains(effectChains);
// effectChains doesn't need to be cleared, since it is cleared by destructor at scope end
}
standbyIfNotAlreadyInStandby();
{
Mutex::Autolock _l(mLock);
for (size_t i = 0; i < mTracks.size(); i++) {
sp<RecordTrack> track = mTracks[i];
track->invalidate();
}
mActiveTracks.clear();
mActiveTracksGen++;
mStartStopCond.broadcast();
}
releaseWakeLock();
ALOGV("RecordThread %p exiting", this);
ALOGI("RecordThread %p exiting", this);
return false;
}
简单说点就是此线程死循环,持续处理相关录制的请求,此处调用stop就会将此线程停止,即可保证停止录音。
3:关键难点
Media相关动作的难点在于2点,1:Media进程持有Camera Client 和AudioRecord Client,用于分别处理音视频,但是整体的难点工作都在各自的线程中,即音频在AudioRecord 线程,视频在Camera 线程,其中Media调用这些都是通过Binder来实现调用的,在Android 6.0上Audio和Camera的HAL进程和Framework进程都在一起,(在8.0之后引入Hidl之后,HAL进程独立,Audio和Camera和各自的HAL调用时均采用HIDL机制实现),而Audio的进程中各个类的调用是难点,在查看时需要理清楚类之间的关系即可明白如何调用,上述stop的过程中遇到的类 的基本关系如下:
类图.png
多媒体进程初始化,主要包括就是AudioFlinger ,AudioPolicyService等
{
// all other services
if (doLog) {
prctl(PR_SET_PDEATHSIG, SIGKILL); // if parent media.log dies before me, kill me also
setpgid(0, 0); // but if I die first, don't kill my parent
}
InitializeIcuOrDie();
sp<ProcessState> proc(ProcessState::self());
sp<IServiceManager> sm = defaultServiceManager();
ALOGI("ServiceManager: %p", sm.get());
hwservices_get_module("libhuaweiaudioalgoservice.so","HuaweiAudioAlgoServiceInit");
/*<DTS2014101105893 zhuxuefu 00290029 20141008 begin */
int status = hwservices_get_module("libaudioflinger.huawei.so", "launchHwaudioflingerServices");
ALOGI("hwservices_get_module ends...");
if (status != 0)
{
AudioFlinger::instantiate();
}
/* DTS2014101105893 zhuxuefu 00290029 20141008 end>*/
MediaPlayerService::instantiate();
ResourceManagerService::instantiate();
CameraService::instantiate();
/*< DTS2015031908073 c00287748 20150319 begin*/
PostCameraService::instantiate();
/* DTS2015031908073 c00287748 20150319 end >*/
AudioPolicyService::instantiate();
SoundTriggerHwService::instantiate();
RadioService::instantiate();
status = tdservices_get_module("libextcameraservice.tdtech", "ExtCamera");
ALOGI("hwservices_get_module ends...");
if (status != 0)
{
///
}
registerExtensions();
ProcessState::self()->startThreadPool();
IPCThreadState::self()->joinThreadPool();
}
网友评论