MediaPlayer代码分析(1)-初始化和设置数据的过程

时间:2021-07-01 03:52:26

本文分析了android4.4.4的MediaPlayer的初始化和设置数据的过程


{"native_init",         "()V",                              (void *)android_media_MediaPlayer_native_init},

static void android_media_MediaPlayer_native_init(JNIEnv *env)
{
jclass clazz;

clazz = env->FindClass("android/media/MediaPlayer");
if (clazz == NULL) {
return;
}

fields.context = env->GetFieldID(clazz, "mNativeContext", "I");
if (fields.context == NULL) {
return;
}

fields.post_event = env->GetStaticMethodID(clazz, "postEventFromNative",
"(Ljava/lang/Object;IIILjava/lang/Object;)V");
if (fields.post_event == NULL) {
return;
}

fields.surface_texture = env->GetFieldID(clazz, "mNativeSurfaceTexture", "I");
if (fields.surface_texture == NULL) {
return;
}

clazz = env->FindClass("android/net/ProxyProperties");
if (clazz == NULL) {
return;
}

fields.proxyConfigGetHost =
env->GetMethodID(clazz, "getHost", "()Ljava/lang/String;");

fields.proxyConfigGetPort =
env->GetMethodID(clazz, "getPort", "()I");

fields.proxyConfigGetExclusionList =
env->GetMethodID(clazz, "getExclusionList", "()Ljava/lang/String;");
}
初始化,得到Java层的各种用于jni层的变量,回调函数


{"native_setup",        "(Ljava/lang/Object;)V",            (void *)android_media_MediaPlayer_native_setup},

static void
android_media_MediaPlayer_native_setup(JNIEnv *env, jobject thiz, jobject weak_this)
{
ALOGV("native_setup");
sp<MediaPlayer> mp = new MediaPlayer();
if (mp == NULL) {
jniThrowException(env, "java/lang/RuntimeException", "Out of memory");
return;
}

// create new listener and give it to MediaPlayer
sp<JNIMediaPlayerListener> listener = new JNIMediaPlayerListener(env, thiz, weak_this);
mp->setListener(listener);

// Stow our new C++ MediaPlayer in an opaque field in the Java object.
setMediaPlayer(env, thiz, mp);
}
mp = new MediaPlayer();

MediaPlayer::MediaPlayer()
{
ALOGV("constructor");
mListener = NULL;
mCookie = NULL;
mStreamType = AUDIO_STREAM_MUSIC;
mCurrentPosition = -1;
mSeekPosition = -1;
mCurrentState = MEDIA_PLAYER_IDLE;
mPrepareSync = false;
mPrepareStatus = NO_ERROR;
mLoop = false;
mLeftVolume = mRightVolume = 1.0;
mVideoWidth = mVideoHeight = 0;
mLockThreadId = 0;
mAudioSessionId = AudioSystem::newAudioSessionId();
AudioSystem::acquireAudioSessionId(mAudioSessionId);
mSendLevel = 0;
mRetransmitEndpointValid = false;
mAudioSessionId = AUdioSystem::newAudioSessionId();,得到新的AudioSessionId

int AudioSystem::newAudioSessionId() {
const sp<IAudioFlinger>& af = AudioSystem::get_audio_flinger();
if (af == 0) return 0;
return af->newAudioSessionId();
}
获取audioflinger的BpBinder
AudioSystem::get_audio_flinger()

const sp<IAudioFlinger>& AudioSystem::get_audio_flinger()
{
Mutex::Autolock _l(gLock);
if (gAudioFlinger == 0) {
sp<IServiceManager> sm = defaultServiceManager();
sp<IBinder> binder;
do {
binder = sm->getService(String16("media.audio_flinger"));
if (binder != 0)
break;
ALOGW("AudioFlinger not published, waiting...");
usleep(500000); // 0.5 s
} while (true);
if (gAudioFlingerClient == NULL) {
gAudioFlingerClient = new AudioFlingerClient();
} else {
if (gAudioErrorCallback) {
gAudioErrorCallback(NO_ERROR);
}
}
binder->linkToDeath(gAudioFlingerClient);
gAudioFlinger = interface_cast<IAudioFlinger>(binder);
gAudioFlinger->registerClient(gAudioFlingerClient);
}
ALOGE_IF(gAudioFlinger==0, "no AudioFlinger!?");

return gAudioFlinger;
}

af->newAUdioSessionId()

int AudioFlinger::newAudioSessionId()
{
return nextUniqueId();
}
nextUniqueId()

uint32_t AudioFlinger::nextUniqueId()
{
return android_atomic_inc(&mNextUniqueId);
}
每增加一个audiosession,将使用mNextUniqueId记录这个session号,它的声明如下:

volatile int32_t                    mNextUniqueId;
使得访问它的线程都可以获取到,同时使用android_atomic_inc函数,原子增加它,防止竞争,提高了线程安全。volatile只是实现了变量的可见性,不提供原子操作性。

AudioSystem::acquireAudioSessionId(int audioSession)

void AudioSystem::acquireAudioSessionId(int audioSession) {
const sp<IAudioFlinger>& af = AudioSystem::get_audio_flinger();
if (af != 0) {
af->acquireAudioSessionId(audioSession);
}
}
af->acquireAudioSessionId(sessionid)
void AudioFlinger::acquireAudioSessionId(int audioSession)
{
Mutex::Autolock _l(mLock);
pid_t caller = IPCThreadState::self()->getCallingPid();
ALOGV("acquiring %d from %d", audioSession, caller);

// Ignore requests received from processes not known as notification client. The request
// is likely proxied by mediaserver (e.g CameraService) and releaseAudioSessionId() can be
// called from a different pid leaving a stale session reference. Also we don't know how
// to clear this reference if the client process dies.
if (mNotificationClients.indexOfKey(caller) < 0) {
ALOGV("acquireAudioSessionId() unknown client %d for session %d", caller, audioSession);
return;
}

size_t num = mAudioSessionRefs.size();
for (size_t i = 0; i< num; i++) {
AudioSessionRef *ref = mAudioSessionRefs.editItemAt(i);
if (ref->mSessionid == audioSession && ref->mPid == caller) {
ref->mCnt++;
ALOGV(" incremented refcount to %d", ref->mCnt);
return;
}
}
mAudioSessionRefs.push(new AudioSessionRef(audioSession, caller));
ALOGV(" added new entry for %d", audioSession);
}
通过audiosessionId,将mediaplayer,audioflinger绑定起来。每个audiosession和一个pid对应,组成了一个AudioSessionRef。AudioSessionRefs队列维护当前系统中运行的audiosession。

mp->setListener()

status_t MediaPlayer::setListener(const sp<MediaPlayerListener>& listener)
{
ALOGV("setListener");
Mutex::Autolock _l(mLock);
mListener = listener;
return NO_ERROR;
}
mListener用于MediaPlayer::notify()中,功能是将底层的各种状态,通过它返回到java层。

setMediaPlayer(),对新旧player的引用计数。

static sp<MediaPlayer> setMediaPlayer(JNIEnv* env, jobject thiz, const sp<MediaPlayer>& player)
{
Mutex::Autolock l(sLock);
sp<MediaPlayer> old = (MediaPlayer*)env->GetIntField(thiz, fields.context);
if (player.get()) {
player->incStrong((void*)setMediaPlayer);
}
if (old != 0) {
old->decStrong((void*)setMediaPlayer);
}
env->SetIntField(thiz, fields.context, (int)player.get());
return old;
}

SetIntField(thiz, fields.context, (int)player.get()),将创建的player与Java层的mNativeContext联系起来。mNativeContext在Java层的EventHandler.handleMessage中判断是否是对应的底层mediaplayer返回的信息。如果不是,就不处理。


MediaPlayer::setDataSource()

status_t MediaPlayer::setDataSource(const sp<IStreamSource> &source)
{
ALOGV("setDataSource");
status_t err = UNKNOWN_ERROR;
const sp<IMediaPlayerService>& service(getMediaPlayerService()); // 获得BpMediaPlayerService对象
if (service != 0) {
sp<IMediaPlayer> player(service->create(this, mAudioSessionId)); // 获得BpMediaPlayer
if ((NO_ERROR != doSetRetransmitEndpoint(player)) ||
(NO_ERROR != player->setDataSource(source))) {
player.clear();
}
err = attachNewPlayer(player);
}
return err;
}
getMediaPlayerService()

const sp<IMediaPlayerService>&
IMediaDeathNotifier::getMediaPlayerService()
{
ALOGV("getMediaPlayerService");
Mutex::Autolock _l(sServiceLock);
if (sMediaPlayerService == 0) {
sp<IServiceManager> sm = defaultServiceManager();
sp<IBinder> binder;
do {
binder = sm->getService(String16("media.player"));
if (binder != 0) {
break;
}
ALOGW("Media player service not published, waiting...");
usleep(500000); // 0.5 s
} while (true);

if (sDeathNotifier == NULL) {
sDeathNotifier = new DeathNotifier();
}
binder->linkToDeath(sDeathNotifier);
sMediaPlayerService = interface_cast<IMediaPlayerService>(binder);
}
ALOGE_IF(sMediaPlayerService == 0, "no media player service!?");
return sMediaPlayerService;
}
该函数从ServiceManager得到了MediaPlayerPlayerService的Bp端,sMediaPlayerService就是BpMediaPlayerService。

之后对IMediaPlayer player进行赋值。

service->create(this, mAudioSessionId)。create是BpMediaPlayerService里的函数。

    virtual sp<IMediaPlayer> create(
const sp<IMediaPlayerClient>& client, int audioSessionId) {
Parcel data, reply;
data.writeInterfaceToken(IMediaPlayerService::getInterfaceDescriptor());
data.writeStrongBinder(client->asBinder());
data.writeInt32(audioSessionId);

remote()->transact(CREATE, data, &reply);
return interface_cast<IMediaPlayer>(reply.readStrongBinder());
}
该函数向BnMediaPlayerService发送了CREATE请求,请求Bn端创建一个BpMediaPlayer。

经过Binder驱动的调用,现在BnMediaPlayerService接收到了transact发送过来的命令,接着将在Bn端的OnTransact中处理CREATE命令

status_t BnMediaPlayerService::onTransact(
uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags)
{
switch (code) {
case CREATE: {
CHECK_INTERFACE(IMediaPlayerService, data, reply);
sp<IMediaPlayerClient> client =
interface_cast<IMediaPlayerClient>(data.readStrongBinder());
int audioSessionId = data.readInt32();
sp<IMediaPlayer> player = create(client, audioSessionId);
reply->writeStrongBinder(player->asBinder()); // 返回BpMediaPlayer
return NO_ERROR;
} break;
BnMediaPlayerService接收到命令,取得client(IMediaPlayerClient)和audioSessionId后,将调用MediaPlayerService::create函数。
sp<IMediaPlayer> MediaPlayerService::create(const sp<IMediaPlayerClient>& client,
int audioSessionId)
{
pid_t pid = IPCThreadState::self()->getCallingPid();
int32_t connId = android_atomic_inc(&mNextConnId);

sp<Client> c = new Client(
this, pid, connId, client, audioSessionId,
IPCThreadState::self()->getCallingUid()); // 创建一个BnMediaPlayer

ALOGV("Create new client(%d) from pid %d, uid %d, ", connId, pid,
IPCThreadState::self()->getCallingUid());

wp<Client> w = c;
{
Mutex::Autolock lock(mLock);
mClients.add(w);
}
return c;
}
create函数此时将创建BnMediaPlayer,也就是Client类。之后将得到的client经过binder返回给BpMediaPlayer

接着进入player->setDataSource(source),由于player是BpMediaPlayer,所以需要到BnMediaPlayer里找对应的处理。

    status_t setDataSource(const sp<IStreamSource> &source) {
Parcel data, reply;
data.writeInterfaceToken(IMediaPlayer::getInterfaceDescriptor());
data.writeStrongBinder(source->asBinder());
remote()->transact(SET_DATA_SOURCE_STREAM, data, &reply);
return reply.readInt32();
}
SET_DATA_SOURCE_FD命令在Bn端的处理

case SET_DATA_SOURCE_STREAM: {
CHECK_INTERFACE(IMediaPlayer, data, reply);
sp<IStreamSource> source =
interface_cast<IStreamSource>(data.readStrongBinder());
reply->writeInt32(setDataSource(fd, offset, length));
return NO_ERROR;
}
接着找到setDataSource函数对应的MediaPlayerService文件中的调用

status_t MediaPlayerService::Client::setDataSource(int fd, int64_t offset, int64_t length)
{
ALOGV("setDataSource fd=%d, offset=%lld, length=%lld", fd, offset, length);
struct stat sb;
int ret = fstat(fd, &sb);
if (ret != 0) {
ALOGE("fstat(%d) failed: %d, %s", fd, ret, strerror(errno));
return UNKNOWN_ERROR;
}

if (offset >= sb.st_size) {
ALOGE("offset error");
::close(fd);
return UNKNOWN_ERROR;
}
if (offset + length > sb.st_size) {
length = sb.st_size - offset;
ALOGV("calculated length = %lld", length);
}

player_type playerType = MediaPlayerFactory::getPlayerType(this,
fd,
offset,
length);
sp<MediaPlayerBase> p = setDataSource_pre(playerType);
if (p == NULL) {
return NO_INIT;
}

// now set data source
setDataSource_post(p, p->setDataSource(fd, offset, length));
return mStatus;
}
在这里先暂停分析,先介绍android中自带的几种播放器以及注册过程。

在MediaPlayerService对象创建时,会注册几种系统默认的播放器。

MediaPlayerService::MediaPlayerService()
{
ALOGV("MediaPlayerService created");
mNextConnId = 1;

。。。

MediaPlayerFactory::registerBuiltinFactories();
}
registerBuiltinFactories()

void MediaPlayerFactory::registerBuiltinFactories() {
Mutex::Autolock lock_(&sLock);

if (sInitComplete)
return;

registerFactory_l(new StagefrightPlayerFactory(), STAGEFRIGHT_PLAYER);
registerFactory_l(new NuPlayerFactory(), NU_PLAYER);
registerFactory_l(new SonivoxPlayerFactory(), SONIVOX_PLAYER);
registerFactory_l(new TestPlayerFactory(), TEST_PLAYER);

sInitComplete = true;
}
好了在这里看到了系统默认注册的4个播放器,分别是StagefrightPlayer, NuPlayer, SonivoxPlayer和TestPlayer。

其实系统中定义了播放器类型的枚举

enum player_type {
PV_PLAYER = 1,
SONIVOX_PLAYER = 2,
STAGEFRIGHT_PLAYER = 3,
NU_PLAYER = 4,
// Test players are available only in the 'test' and 'eng' builds.
// The shared library with the test player is passed passed as an
// argument to the 'test:' url in the setDataSource call.
TEST_PLAYER = 5,
};
由于android在2.3之后不适用opencore架构了,现在使用的是stagefright架构,所以pvplayer代码中已经消失了。

registerFactory_l(IFactory* factory, player_type type)

status_t MediaPlayerFactory::registerFactory_l(IFactory* factory,
player_type type) {
if (NULL == factory) {
ALOGE("Failed to register MediaPlayerFactory of type %d, factory is"
" NULL.", type);
return BAD_VALUE;
}

if (sFactoryMap.indexOfKey(type) >= 0) {
ALOGE("Failed to register MediaPlayerFactory of type %d, type is"
" already registered.", type);
return ALREADY_EXISTS;
}

if (sFactoryMap.add(type, factory) < 0) {
ALOGE("Failed to register MediaPlayerFactory of type %d, failed to add"
" to map.", type);
return UNKNOWN_ERROR;
}

return OK;
}
向sFactoryMap的键值对中加入对应类型的播放器。

回到setDataSource函数,此时需要得到文件对应的播放器,函数MediaPlayerFactory::getPlayerType其实就是一个宏定义

#define GET_PLAYER_TYPE_IMPL(a...)                      \
Mutex::Autolock lock_(&sLock); \
\
player_type ret = STAGEFRIGHT_PLAYER; \
float bestScore = 0.0; \
\
for (size_t i = 0; i < sFactoryMap.size(); ++i) { \
\
IFactory* v = sFactoryMap.valueAt(i); \
float thisScore; \
CHECK(v != NULL); \
thisScore = v->scoreFactory(a, bestScore); \
if (thisScore > bestScore) { \
ret = sFactoryMap.keyAt(i); \
bestScore = thisScore; \
} \
} \
\
if (0.0 == bestScore) { \
ret = getDefaultPlayerType(); \
} \
\
return ret;
里面会对每个播放器进行打分,也就是看哪种播放器最合适。这其实是根据文件类型判断的,每种媒体文件都有指示文件格式的字段。
一般都是选择StageFrightPlayer。那么假定player_type是STAGEFRIGHT_PLAYER。

接着进入MediaPlayerService::Client::setDataSource_pre(player_type playerType)

sp<MediaPlayerBase> MediaPlayerService::Client::setDataSource_pre(
player_type playerType)
{
ALOGV("player type = %d", playerType);

// create the right type of player
sp<MediaPlayerBase> p = createPlayer(playerType);
if (p == NULL) {
return p;
}

if (!p->hardwareOutput()) {
mAudioOutput = new AudioOutput(mAudioSessionId, IPCThreadState::self()->getCallingUid());
static_cast<MediaPlayerInterface*>(p.get())->setAudioSink(mAudioOutput);
}

return p;
}
该函数将创建对应的播放器,并且设置AudioOutput。
createPlayer(player_type)

sp<MediaPlayerBase> MediaPlayerService::Client::createPlayer(player_type playerType)
{
// determine if we have the right player type
sp<MediaPlayerBase> p = mPlayer;
if ((p != NULL) && (p->playerType() != playerType)) {
ALOGV("delete player");
p.clear();
}
if (p == NULL) {
p = MediaPlayerFactory::createPlayer(playerType, this, notify);
}

if (p != NULL) {
p->setUID(mUID);
}

return p;
}
继续看MediaPlayerFactory::createPlayer
sp<MediaPlayerBase> MediaPlayerFactory::createPlayer(
player_type playerType,
void* cookie,
notify_callback_f notifyFunc) {
sp<MediaPlayerBase> p;
IFactory* factory;
status_t init_result;
Mutex::Autolock lock_(&sLock);

if (sFactoryMap.indexOfKey(playerType) < 0) {
ALOGE("Failed to create player object of type %d, no registered"
" factory", playerType);
return p;
}

factory = sFactoryMap.valueFor(playerType);
CHECK(NULL != factory);
p = factory->createPlayer();

if (p == NULL) {
ALOGE("Failed to create player object of type %d, create failed",
playerType);
return p;
}

init_result = p->initCheck();
if (init_result == NO_ERROR) {
p->setNotifyCallback(cookie, notifyFunc);
} else {
ALOGE("Failed to create player object of type %d, initCheck failed"
" (res = %d)", playerType, init_result);
p.clear();
}

return p;
}
首先检查传入的类型是否有播放器注册,如果没有就报错。有的话会调用播放器创建函数进行创建。

class StagefrightPlayerFactory :
public MediaPlayerFactory::IFactory {
public:

virtual sp<MediaPlayerBase> createPlayer() {
ALOGV(" create StagefrightPlayer");
return new StagefrightPlayer();
}
};
StagefrightPlayer()

StagefrightPlayer::StagefrightPlayer()
: mPlayer(new AwesomePlayer) {
ALOGV("StagefrightPlayer");

mPlayer->setListener(this);
}
至此,我们终于见到了播放器的庐山真面目,原来是AwsomePlayer,帅得兜了好几个圈子。

当看完StagefrightPlayer.cpp后,我们发现,原来这个StagefrightPlayer就是AwsomePlayer封装了一层,里面所有的工作几乎都是AwsomwPlayer去完成的。

那么后面的分析,我们将跳过StagefrightPlayer文件,直接分析AwsomePlayer。

setDataSource_post(p, p->setDataSource(fd, offset, length));

先看p->setDataSource(),这其实是调用AwsomePlayer的setDataSource

status_t AwesomePlayer::setDataSource(
int fd, int64_t offset, int64_t length) {
Mutex::Autolock autoLock(mLock);

reset_l();

sp<DataSource> dataSource = new FileSource(fd, offset, length);

status_t err = dataSource->initCheck();

if (err != OK) {
return err;
}

mFileSource = dataSource;

{
Mutex::Autolock autoLock(mStatsLock);
mStats.mFd = fd;
mStats.mURI = String8();
}

return setDataSource_l(dataSource);
AwesomePlayer::setDataSource_l()

status_t AwesomePlayer::setDataSource_l(
const sp<DataSource> &dataSource) {
sp<MediaExtractor> extractor = MediaExtractor::Create(dataSource);

if (extractor == NULL) {
return UNKNOWN_ERROR;
}

if (extractor->getDrmFlag()) {
checkDrmStatus(dataSource);
}

return setDataSource_l(extractor);
}
MediaExtractor::Create(dataSource)

sp<MediaExtractor> MediaExtractor::Create(
const sp<DataSource> &source, const char *mime) {
sp<AMessage> meta;

String8 tmp;
if (mime == NULL) {
float confidence;
if (!source->sniff(&tmp, &confidence, &meta)) {
ALOGV("FAILED to autodetect media content.");

return NULL;
}

mime = tmp.string();
ALOGV("Autodetected media content as '%s' with confidence %.2f",
mime, confidence);
}

bool isDrm = false;
// DRM MIME type syntax is "drm+type+original" where
// type is "es_based" or "container_based" and
// original is the content's cleartext MIME type
if (!strncmp(mime, "drm+", 4)) {
const char *originalMime = strchr(mime+4, '+');
if (originalMime == NULL) {
// second + not found
return NULL;
}
++originalMime;
if (!strncmp(mime, "drm+es_based+", 13)) {
// DRMExtractor sets container metadata kKeyIsDRM to 1
return new DRMExtractor(source, originalMime);
} else if (!strncmp(mime, "drm+container_based+", 20)) {
mime = originalMime;
isDrm = true;
} else {
return NULL;
}
}

MediaExtractor *ret = NULL;
if (!strcasecmp(mime, MEDIA_MIMETYPE_CONTAINER_MPEG4)
|| !strcasecmp(mime, "audio/mp4")) {
ret = new MPEG4Extractor(source);
} else if (!strcasecmp(mime, MEDIA_MIMETYPE_AUDIO_MPEG)) {
ret = new MP3Extractor(source, meta);
} else if (!strcasecmp(mime, MEDIA_MIMETYPE_AUDIO_AMR_NB)
|| !strcasecmp(mime, MEDIA_MIMETYPE_AUDIO_AMR_WB)) {
ret = new AMRExtractor(source);
} else if (!strcasecmp(mime, MEDIA_MIMETYPE_AUDIO_FLAC)) {
ret = new FLACExtractor(source);
} else if (!strcasecmp(mime, MEDIA_MIMETYPE_CONTAINER_WAV)) {
ret = new WAVExtractor(source);
} else if (!strcasecmp(mime, MEDIA_MIMETYPE_CONTAINER_OGG)) {
ret = new OggExtractor(source);
} else if (!strcasecmp(mime, MEDIA_MIMETYPE_CONTAINER_MATROSKA)) {
ret = new MatroskaExtractor(source);
} else if (!strcasecmp(mime, MEDIA_MIMETYPE_CONTAINER_MPEG2TS)) {
ret = new MPEG2TSExtractor(source);
} else if (!strcasecmp(mime, MEDIA_MIMETYPE_CONTAINER_WVM)) {
// Return now. WVExtractor should not have the DrmFlag set in the block below.
return new WVMExtractor(source);
} else if (!strcasecmp(mime, MEDIA_MIMETYPE_AUDIO_AAC_ADTS)) {
ret = new AACExtractor(source, meta);
} else if (!strcasecmp(mime, MEDIA_MIMETYPE_CONTAINER_MPEG2PS)) {
ret = new MPEG2PSExtractor(source);
}

if (ret != NULL) {
if (isDrm) {
ret->setDrmFlag(true);
} else {
ret->setDrmFlag(false);
}
}

return ret;
}
由于传入的mime是null,所以extractor需要sniff出mime类型。

系统默认注册了几种文件类型

void DataSource::RegisterDefaultSniffers() {
Mutex::Autolock autoLock(gSnifferMutex);
if (gSniffersRegistered) {
return;
}

RegisterSniffer_l(SniffMPEG4);
RegisterSniffer_l(SniffMatroska);
RegisterSniffer_l(SniffOgg);
RegisterSniffer_l(SniffWAV);
RegisterSniffer_l(SniffFLAC);
RegisterSniffer_l(SniffAMR);
RegisterSniffer_l(SniffMPEG2TS);
RegisterSniffer_l(SniffMP3);
RegisterSniffer_l(SniffAAC);
RegisterSniffer_l(SniffMPEG2PS);
RegisterSniffer_l(SniffWVM);

char value[PROPERTY_VALUE_MAX];
if (property_get("drm.service.enabled", value, NULL)
&& (!strcmp(value, "1") || !strcasecmp(value, "true"))) {
RegisterSniffer_l(SniffDRM);
}
gSniffersRegistered = true;
}
sniff函数的流程

bool DataSource::sniff(
String8 *mimeType, float *confidence, sp<AMessage> *meta) {
*mimeType = "";
*confidence = 0.0f;
meta->clear();

{
Mutex::Autolock autoLock(gSnifferMutex);
if (!gSniffersRegistered) {
return false;
}
}

for (List<SnifferFunc>::iterator it = gSniffers.begin();
it != gSniffers.end(); ++it) {
String8 newMimeType;
float newConfidence;
sp<AMessage> newMeta;
if ((*it)(this, &newMimeType, &newConfidence, &newMeta)) {
if (newConfidence > *confidence) {
*mimeType = newMimeType;
*confidence = newConfidence;
*meta = newMeta;
}
}
}

return *confidence > 0.0;
}
待判断出MIMWTYPE后,就会调用对应的提取器XXXExatractor对文件进行提取。

setDataSource_l(extractor)函数做了如下工作:

1 获取文件的bitrate,kKeyBitRate

2 获取视频流

    2.1 获取视频宽度,kKeyDisplayWidth

    2.2 获取视频高度,kKeyDisplayHeight

3 获取音频流

4 获得视频的字幕

status_t AwesomePlayer::setDataSource_l(const sp<MediaExtractor> &extractor) {
// Attempt to approximate overall stream bitrate by summing all
// tracks' individual bitrates, if not all of them advertise bitrate,
// we have to fail.

int64_t totalBitRate = 0;

mExtractor = extractor;
for (size_t i = 0; i < extractor->countTracks(); ++i) {
sp<MetaData> meta = extractor->getTrackMetaData(i);

int32_t bitrate;
if (!meta->findInt32(kKeyBitRate, &bitrate)) {
const char *mime;
CHECK(meta->findCString(kKeyMIMEType, &mime));
ALOGV("track of type '%s' does not publish bitrate", mime);

totalBitRate = -1;
break;
}

totalBitRate += bitrate;
}

mBitrate = totalBitRate;

ALOGV("mBitrate = %lld bits/sec", mBitrate);

{
Mutex::Autolock autoLock(mStatsLock);
mStats.mBitrate = mBitrate;
mStats.mTracks.clear();
mStats.mAudioTrackIndex = -1;
mStats.mVideoTrackIndex = -1;
}

bool haveAudio = false;
bool haveVideo = false;
for (size_t i = 0; i < extractor->countTracks(); ++i) {
sp<MetaData> meta = extractor->getTrackMetaData(i);

const char *_mime;
CHECK(meta->findCString(kKeyMIMEType, &_mime));

String8 mime = String8(_mime);

if (!haveVideo && !strncasecmp(mime.string(), "video/", 6)) {
setVideoSource(extractor->getTrack(i));
haveVideo = true;

// Set the presentation/display size
int32_t displayWidth, displayHeight;
bool success = meta->findInt32(kKeyDisplayWidth, &displayWidth);
if (success) {
success = meta->findInt32(kKeyDisplayHeight, &displayHeight);
}
if (success) {
mDisplayWidth = displayWidth;
mDisplayHeight = displayHeight;
}

{
Mutex::Autolock autoLock(mStatsLock);
mStats.mVideoTrackIndex = mStats.mTracks.size();
mStats.mTracks.push();
TrackStat *stat =
&mStats.mTracks.editItemAt(mStats.mVideoTrackIndex);
stat->mMIME = mime.string();
}
} else if (!haveAudio && !strncasecmp(mime.string(), "audio/", 6)) {
setAudioSource(extractor->getTrack(i));
haveAudio = true;
mActiveAudioTrackIndex = i;

{
Mutex::Autolock autoLock(mStatsLock);
mStats.mAudioTrackIndex = mStats.mTracks.size();
mStats.mTracks.push();
TrackStat *stat =
&mStats.mTracks.editItemAt(mStats.mAudioTrackIndex);
stat->mMIME = mime.string();
}

if (!strcasecmp(mime.string(), MEDIA_MIMETYPE_AUDIO_VORBIS)) {
// Only do this for vorbis audio, none of the other audio
// formats even support this ringtone specific hack and
// retrieving the metadata on some extractors may turn out
// to be very expensive.
sp<MetaData> fileMeta = extractor->getMetaData();
int32_t loop;
if (fileMeta != NULL
&& fileMeta->findInt32(kKeyAutoLoop, &loop) && loop != 0) {
modifyFlags(AUTO_LOOPING, SET);
}
}
} else if (!strcasecmp(mime.string(), MEDIA_MIMETYPE_TEXT_3GPP)) {
addTextSource_l(i, extractor->getTrack(i));
}
}

if (!haveAudio && !haveVideo) {
if (mWVMExtractor != NULL) {
return mWVMExtractor->getError();
} else {
return UNKNOWN_ERROR;
}
}

mExtractorFlags = extractor->flags();

return OK;
}

这里主要分析getTrack,给函数主要用于读取音视频的track记录。下面各举一栗子。video的是mpeg4,audio的是amr

video的track

MPEG4Extractor::getTrack

sp<MediaSource> MPEG4Extractor::getTrack(size_t index) {
status_t err;
if ((err = readMetaData()) != OK) {
return NULL;
}

Track *track = mFirstTrack;
while (index > 0) {
if (track == NULL) {
return NULL;
}

track = track->next;
--index;
}

if (track == NULL) {
return NULL;
}

ALOGV("getTrack called, pssh: %d", mPssh.size());

return new MPEG4Source(
track->meta, mDataSource, track->timescale, track->sampleTable,
mSidxEntries, mMoofOffset);
}

之后是AwesomePlayer::setVideoSource

void AwesomePlayer::setVideoSource(sp<MediaSource> source) {
CHECK(source != NULL);

mVideoTrack = source;
}
mVideoTrack在后面用于codec的视频解码用。

audio的track

AMRExtrator::getTrack

sp<MediaSource> AMRExtractor::getTrack(size_t index) {
if (mInitCheck != OK || index != 0) {
return NULL;
}

return new AMRSource(mDataSource, mMeta, mIsWide,
mOffsetTable, mOffsetTableLength);
}
创建一个AMRSource对象。
之后是AwesomePlayer::setAudioSource

void AwesomePlayer::setAudioSource(sp<MediaSource> source) {
CHECK(source != NULL);

mAudioTrack = source;
}
mAudioTrack用于之后的codec音频解码用。

最后是setDataSource_post,它返回之前设置的状态

void MediaPlayerService::Client::setDataSource_post(
const sp<MediaPlayerBase>& p,
status_t status)
{
ALOGV(" setDataSource");
mStatus = status;
if (mStatus != OK) {
ALOGE(" error: %d", mStatus);
return;
}

// Set the re-transmission endpoint if one was chosen.
if (mRetransmitEndpointValid) {
mStatus = p->setRetransmitEndpoint(&mRetransmitEndpoint);
if (mStatus != NO_ERROR) {
ALOGE("setRetransmitEndpoint error: %d", mStatus);
}
}

if (mStatus == OK) {
mPlayer = p;
}
}