轉載請注明出處:[](http://blog.csdn.net/guolin_blog/article/details/42238633)[](http://blog.csdn.net/itachi85/article/details/45041923)http://blog.csdn.net/itachi85/article/details/7216639
從Android 2.0,Google引進了Stagefright,并在android2.3時用Stagefright在Android中是以shared library的形式存在(libstagefright.so),其中AwesomePlayer可用來播放video/audio。AwesomePlayer提供許多API,可以讓上層的應用用程式(Java/JNI)來呼叫,我在這里簡單說明一下video playback的流程(采用的是android2.2的源碼)。
在Java中,若要播放一個影片,我們通常會這樣寫:
~~~
MediaPlayer mp = new MediaPlayer();
mp.setDataSource(PATH_TO_FILE);
mp.prepare();
mp.start();
~~~
在Stagefright中,會看到如下的處理:
1.將影片文件的絕對路徑指定給uri:
~~~
status_t AwesomePlayer::setDataSource(
const char *uri, const KeyedVector<String8, String8> *headers) {
Mutex::Autolock autoLock(mLock);
return setDataSource_l(uri, headers);
}
status_t AwesomePlayer::setDataSource_l(
const char *uri, const KeyedVector<String8, String8> *headers) {
reset_l();
mUri = uri;
if (headers) {
mUriHeaders = *headers;
}
// The actual work will be done during preparation in the call to
// ::finishSetDataSource_l to avoid blocking the calling thread in
// setDataSource for any significant time.
return OK;
}
~~~
2.啟動mQueue:
~~~
status_t AwesomePlayer::prepare() {
Mutex::Autolock autoLock(mLock);
return prepare_l();
}
status_t AwesomePlayer::prepare_l() {
if (mFlags & PREPARED) {
return OK;
}
if (mFlags & PREPARING) {
return UNKNOWN_ERROR;
}
mIsAsyncPrepare = false;
status_t err = prepareAsync_l();
if (err != OK) {
return err;
}
while (mFlags & PREPARING) {
mPreparedCondition.wait(mLock);
}
return mPrepareResult;
}
status_t AwesomePlayer::prepareAsync() {
Mutex::Autolock autoLock(mLock);
if (mFlags & PREPARING) {
return UNKNOWN_ERROR; // async prepare already pending
}
mIsAsyncPrepare = true;
return prepareAsync_l();
}
status_t AwesomePlayer::prepareAsync_l() {
if (mFlags & PREPARING) {
return UNKNOWN_ERROR; // async prepare already pending
}
if (!mQueueStarted) {
mQueue.start();
mQueueStarted = true;
}
mFlags |= PREPARING;
mAsyncPrepareEvent = new AwesomeEvent(
this, &AwesomePlayer::onPrepareAsyncEvent);
mQueue.postEvent(mAsyncPrepareEvent);
return OK;
}
~~~
3.onprepareAsyncEvent被觸發,根據傳來文件的header來創建相應的解析器,并初始化音視頻解碼器:
~~~
void AwesomePlayer::onPrepareAsyncEvent() {
sp<Prefetcher> prefetcher;
{
Mutex::Autolock autoLock(mLock);
if (mFlags & PREPARE_CANCELLED) {
LOGI("prepare was cancelled before doing anything");
abortPrepare(UNKNOWN_ERROR);
return;
}
if (mUri.size() > 0) {
//在這個方法中創建解析器
?<strong>status_t err = finishSetDataSource_l();</strong>
if (err != OK) {
abortPrepare(err);
return;
}
}
if (mVideoTrack != NULL && mVideoSource == NULL) {
//初始化視頻解碼器
?<strong>status_t err = initVideoDecoder();</strong>
if (err != OK) {
abortPrepare(err);
return;
}
}
if (mAudioTrack != NULL && mAudioSource == NULL) {
//初始化音頻解碼器
?<strong>status_t err = initAudioDecoder();
</strong>
if (err != OK) {
abortPrepare(err);
return;
}
}
prefetcher = mPrefetcher;
}
if (prefetcher != NULL) {
{
Mutex::Autolock autoLock(mLock);
if (mFlags & PREPARE_CANCELLED) {
LOGI("prepare was cancelled before preparing the prefetcher");
prefetcher.clear();
abortPrepare(UNKNOWN_ERROR);
return;
}
}
LOGI("calling prefetcher->prepare()");
status_t result =
prefetcher->prepare(&AwesomePlayer::ContinuePreparation, this);
prefetcher.clear();
if (result == OK) {
LOGI("prefetcher is done preparing");
} else {
Mutex::Autolock autoLock(mLock);
CHECK_EQ(result, -EINTR);
LOGI("prefetcher->prepare() was cancelled early.");
abortPrepare(UNKNOWN_ERROR);
return;
}
}
Mutex::Autolock autoLock(mLock);
if (mIsAsyncPrepare) {
if (mVideoWidth < 0 || mVideoHeight < 0) {
notifyListener_l(MEDIA_SET_VIDEO_SIZE, 0, 0);
} else {
notifyListener_l(MEDIA_SET_VIDEO_SIZE, mVideoWidth, mVideoHeight);
}
notifyListener_l(MEDIA_PREPARED);
}
mPrepareResult = OK;
mFlags &= ~(PREPARING|PREPARE_CANCELLED);
mFlags |= PREPARED;
mAsyncPrepareEvent = NULL;
mPreparedCondition.broadcast();
postBufferingEvent_l();
}
~~~
~~~
status_t AwesomePlayer::finishSetDataSource_l() {
sp<DataSource> dataSource;
if (!strncasecmp("http://", mUri.string(), 7)) {
mConnectingDataSource = new HTTPDataSource(mUri, &mUriHeaders);
mLock.unlock();
status_t err = mConnectingDataSource->connect();
mLock.lock();
if (err != OK) {
mConnectingDataSource.clear();
LOGI("mConnectingDataSource->connect() returned %d", err);
return err;
}
dataSource = new CachingDataSource(
mConnectingDataSource, 64 * 1024, 10);
mConnectingDataSource.clear();
} else {
dataSource = DataSource::CreateFromURI(mUri.string(), &mUriHeaders);
}
if (dataSource == NULL) {
return UNKNOWN_ERROR;
}
<strong>sp<MediaExtractor> extractor = MediaExtractor::Create(dataSource);
</strong>
if (extractor == NULL) {
return UNKNOWN_ERROR;
}
dataSource->getDrmInfo(&mDecryptHandle, &mDrmManagerClient);
if (mDecryptHandle != NULL
&& RightsStatus::RIGHTS_VALID != mDecryptHandle->status) {
notifyListener_l(MEDIA_ERROR, MEDIA_ERROR_UNKNOWN, ERROR_NO_LICENSE);
}
if (dataSource->flags() & DataSource::kWantsPrefetching) {
mPrefetcher = new Prefetcher;
}
<strong> return setDataSource_l(extractor)</strong>;
}
~~~
4.使用extractor對文件進行A/V分離:
~~~
status_t AwesomePlayer::setDataSource_l(const sp<MediaExtractor> &extractor) {
bool haveAudio = false;
bool haveVideo = false;
for (size_t i = 0; i < extractor->countTracks(); ++i) {
sp<MetaData> meta = extractor->getTrackMetaData(i);
const char *mime;
CHECK(meta->findCString(kKeyMIMEType, &mime));
if (!haveVideo && !strncasecmp(mime, "video/", 6)) {
setVideoSource(extractor->getTrack(i));
haveVideo = true;
} else if (!haveAudio && !strncasecmp(mime, "audio/", 6)) {
setAudioSource(extractor->getTrack(i));
haveAudio = true;
}
if (haveAudio && haveVideo) {
break;
}
}
if (!haveAudio && !haveVideo) {
return UNKNOWN_ERROR;
}
mExtractorFlags = extractor->flags();
return OK;
}
~~~
5.將解析后的音視頻數據分別交給VideoTrack和AudioTrack:
~~~
void AwesomePlayer::setVideoSource(sp<MediaSource> source) {
CHECK(source != NULL);
if (mPrefetcher != NULL) {
source = mPrefetcher->addSource(source);
}
mVideoTrack = source;
}
void AwesomePlayer::setAudioSource(sp<MediaSource> source) {
??? CHECK(source != NULL);
??? if (mPrefetcher != NULL) {
??????? source = mPrefetcher->addSource(source);
??? }
??? mAudioTrack = source;
}
~~~
6.根據mVideoTrck中的編碼類型來選擇 video decoder 同理根據mAudioTrack中的編碼類型來選擇 audio decoder:
~~~
status_t AwesomePlayer::initVideoDecoder() {
mVideoSource = OMXCodec::Create(
mClient.interface(), mVideoTrack->getFormat(),
false, // createEncoder
mVideoTrack);
if (mVideoSource != NULL) {
int64_t durationUs;
if (mVideoTrack->getFormat()->findInt64(kKeyDuration, &durationUs)) {
Mutex::Autolock autoLock(mMiscStateLock);
if (mDurationUs < 0 || durationUs > mDurationUs) {
mDurationUs = durationUs;
}
}
CHECK(mVideoTrack->getFormat()->findInt32(kKeyWidth, &mVideoWidth));
CHECK(mVideoTrack->getFormat()->findInt32(kKeyHeight, &mVideoHeight));
status_t err = mVideoSource->start();
if (err != OK) {
mVideoSource.clear();
return err;
}
}
return mVideoSource != NULL ? OK : UNKNOWN_ERROR;
}
~~~
7.將mVideoEvent放入mQueue中,開始解碼播放,并交由mvideoRenderer來畫出?? audio的數據則交由audioplayer來管理,它最終將解碼的數據交給audioTrack并由audioTrack和audioFlinger進行交互,最終將數據交給audio hal層,這個我們以后會做講解:
~~~
status_t AwesomePlayer::play() {
Mutex::Autolock autoLock(mLock);
return play_l();
}
status_t AwesomePlayer::play_l() {
if (mFlags & PLAYING) {
return OK;
}
if (!(mFlags & PREPARED)) {
status_t err = prepare_l();
if (err != OK) {
return err;
}
}
mFlags |= PLAYING;
mFlags |= FIRST_FRAME;
bool deferredAudioSeek = false;
if (mAudioSource != NULL) {
if (mAudioPlayer == NULL) {
if (mAudioSink != NULL) {
//音頻數據由audioplayer進行管理
?mAudioPlayer = new AudioPlayer(mAudioSink);
mAudioPlayer->setSource(mAudioSource);
// We've already started the MediaSource in order to enable
// the prefetcher to read its data.
//調用audioPlayer的start方法則是調用audioSource對數據進行解碼
//并將解碼似得數據最終交給audioTrack,并調用audioTrack的start方法與audioFlinger進行交互
?status_t err = mAudioPlayer->start(
true /* sourceAlreadyStarted */);
if (err != OK) {
delete mAudioPlayer;
mAudioPlayer = NULL;
mFlags &= ~(PLAYING | FIRST_FRAME);
return err;
}
delete mTimeSource;
mTimeSource = mAudioPlayer;
deferredAudioSeek = true;
mWatchForAudioSeekComplete = false;
mWatchForAudioEOS = true;
}
} else {
mAudioPlayer->resume();
}
postCheckAudioStatusEvent_l();
}
if (mTimeSource == NULL && mAudioPlayer == NULL) {
mTimeSource = new SystemTimeSource;
}
if (mVideoSource != NULL) {
// Kick off video playback
//將mVideoEvent放入queue中
?postVideoEvent_l();
}
if (deferredAudioSeek) {
// If there was a seek request while we were paused
// and we're just starting up again, honor the request now.
seekAudioIfNecessary_l();
}
if (mFlags & AT_EOS) {
// Legacy behaviour, if a stream finishes playing and then
// is started again, we play from the start...
seekTo_l(0);
}
if (mDecryptHandle != NULL) {
int64_t position;
getPosition(&position);
mDrmManagerClient->setPlaybackStatus(mDecryptHandle,
Playback::START, position / 1000);
}
return OK;
}
?void AwesomePlayer::postVideoEvent_l(int64_t delayUs) {
??? if (mVideoEventPending) {
??????? return;
??? }
??? mVideoEventPending = true;
??? mQueue.postEventWithDelay(mVideoEvent, delayUs < 0 ? 10000 : delayUs);
}
~~~
~~~
void AwesomePlayer::onVideoEvent()
{
mVideoSource->read(&mVideoBuffer, &options);
mVideoRenderer->render(mVideoBuffer);
postVideoEvent_l();
}
~~~
- 前言
- Camera源碼分析(android2.2)
- Android開機啟動流程說明
- android應用程序管理機制
- MediaPlayer框架概述(一)
- MediaPlayer框架概述(二)
- Android MediaPlayer+Stagefright框架(音頻)圖解
- Stagefright框架解讀(—)音視頻Playback流程
- Android mediaRecorder框架簡述(一)
- Android mediaRecorder框架簡述(二)
- Android IntentService淺談以及源碼分析
- Android多線程(二)AsyncTask源碼分析
- Android View體系(五)從源碼解析View的事件分發機制
- Android View體系(六)從源碼解析Activity的構成