Motown: ffmpeg implementations of framework 'parts'
ffmpeg_video_decoder.* is not yet functional
R=johngro@google.com
Review URL: https://codereview.chromium.org/1686363002 .
diff --git a/build/config/compiler/BUILD.gn b/build/config/compiler/BUILD.gn
index da32a26..8505a9f 100644
--- a/build/config/compiler/BUILD.gn
+++ b/build/config/compiler/BUILD.gn
@@ -914,6 +914,14 @@
cflags += default_warning_flags
cflags_cc += default_warning_flags_cc
+
+ if (is_clang) {
+ cflags += [
+ # TODO(dalesat): Remove once not broken by third party (ffmpeg).
+ # See https://github.com/domokit/mojo/issues/692.
+ "-Wno-constant-conversion",
+ ]
+ }
}
# rtti ------------------------------------------------------------------------
diff --git a/services/media/BUILD.gn b/services/media/BUILD.gn
index 969dc99..a46edc6 100644
--- a/services/media/BUILD.gn
+++ b/services/media/BUILD.gn
@@ -8,6 +8,7 @@
"//services/media/common",
"//services/media/framework",
"//services/media/framework_create",
+ "//services/media/framework_ffmpeg",
"//services/media/framework_mojo",
]
}
diff --git a/services/media/framework/parts/decoder.h b/services/media/framework/parts/decoder.h
index 579ce37..ed9e1e0 100644
--- a/services/media/framework/parts/decoder.h
+++ b/services/media/framework/parts/decoder.h
@@ -26,10 +26,6 @@
// Returns the type of the stream the decoder will produce.
virtual std::unique_ptr<StreamType> output_stream_type() = 0;
-
- protected:
- // Initializes the decoder. Called by Decoder::Create.
- virtual Result Init(const StreamType& stream_type) = 0;
};
} // namespace media
diff --git a/services/media/framework/parts/file_reader.cc b/services/media/framework/parts/file_reader.cc
index 6c68d05..623e1e4 100644
--- a/services/media/framework/parts/file_reader.cc
+++ b/services/media/framework/parts/file_reader.cc
@@ -40,7 +40,7 @@
return Result::kOk;
}
-size_t FileReader::Read(uint8* buffer, int bytes_to_read) {
+size_t FileReader::Read(uint8_t* buffer, size_t bytes_to_read) {
return fread(buffer, 1, bytes_to_read, file_);
}
@@ -48,7 +48,7 @@
return ftell(file_);
}
-int64_t FileReader::SetPosition(int64 position) {
+int64_t FileReader::SetPosition(int64_t position) {
if (fseek(file_, position, SEEK_SET) < 0) {
return -1;
}
diff --git a/services/media/framework/parts/file_reader.h b/services/media/framework/parts/file_reader.h
index 5449286..c25291c 100644
--- a/services/media/framework/parts/file_reader.h
+++ b/services/media/framework/parts/file_reader.h
@@ -22,11 +22,11 @@
// Reader implementation.
Result Init(const GURL& gurl) override;
- size_t Read(uint8* buffer, int bytes_to_read) override;
+ size_t Read(uint8_t* buffer, size_t bytes_to_read) override;
int64_t GetPosition() const override;
- int64_t SetPosition(int64 position) override;
+ int64_t SetPosition(int64_t position) override;
size_t GetSize() const override;
@@ -36,7 +36,7 @@
FileReader() {}
FILE* file_;
- int64 size_;
+ size_t size_;
};
} // namespace media
diff --git a/services/media/framework/parts/reader.h b/services/media/framework/parts/reader.h
index dc07ad8..8cf9c20 100644
--- a/services/media/framework/parts/reader.h
+++ b/services/media/framework/parts/reader.h
@@ -27,14 +27,14 @@
// Reads the given number of bytes into the buffer and returns the number of
// bytes read. Returns -1 if the operation fails.
- virtual size_t Read(uint8* buffer, int bytes_to_read) = 0;
+ virtual size_t Read(uint8_t* buffer, size_t bytes_to_read) = 0;
// Gets the current position or -1 if the operation fails.
virtual int64_t GetPosition() const = 0;
// Seeks to the given position and returns it. Returns -1 if the operation
// fails.
- virtual int64_t SetPosition(int64 position) = 0;
+ virtual int64_t SetPosition(int64_t position) = 0;
// Returns the file size. Returns -1 if the operation fails or the size isn't
// known.
diff --git a/services/media/framework_create/BUILD.gn b/services/media/framework_create/BUILD.gn
index b909fdb..f383688 100644
--- a/services/media/framework_create/BUILD.gn
+++ b/services/media/framework_create/BUILD.gn
@@ -13,5 +13,6 @@
deps = [
"//services/media/framework",
+ "//services/media/framework_ffmpeg",
]
}
diff --git a/services/media/framework_create/decoder.cc b/services/media/framework_create/decoder.cc
index 1c0fa3a..fd3c06d 100644
--- a/services/media/framework_create/decoder.cc
+++ b/services/media/framework_create/decoder.cc
@@ -3,6 +3,7 @@
// found in the LICENSE file.
#include "services/media/framework/parts/decoder.h"
+#include "services/media/framework_ffmpeg/ffmpeg_decoder.h"
namespace mojo {
namespace media {
@@ -10,7 +11,13 @@
Result Decoder::Create(
const StreamType& stream_type,
std::shared_ptr<Decoder>* decoder_out) {
- return Result::kUnsupportedOperation;
+ std::shared_ptr<Decoder> decoder;
+ Result result = FfmpegDecoder::Create(stream_type, &decoder);
+ if (result == Result::kOk) {
+ *decoder_out = decoder;
+ }
+
+ return result;
}
} // namespace media
diff --git a/services/media/framework_create/demux.cc b/services/media/framework_create/demux.cc
index c763c1b..5285b8b 100644
--- a/services/media/framework_create/demux.cc
+++ b/services/media/framework_create/demux.cc
@@ -3,6 +3,7 @@
// found in the LICENSE file.
#include "services/media/framework/parts/demux.h"
+#include "services/media/framework_ffmpeg/ffmpeg_demux.h"
namespace mojo {
namespace media {
@@ -10,7 +11,14 @@
Result Demux::Create(
std::shared_ptr<Reader> reader,
std::shared_ptr<Demux>* demux_out) {
- return Result::kUnsupportedOperation;
+ std::shared_ptr<Demux> demux = FfmpegDemux::Create();
+
+ Result result = demux->Init(reader);
+ if (result == Result::kOk) {
+ *demux_out = demux;
+ }
+
+ return result;
}
} // namespace media
diff --git a/services/media/framework_ffmpeg/BUILD.gn b/services/media/framework_ffmpeg/BUILD.gn
new file mode 100644
index 0000000..8934bc5
--- /dev/null
+++ b/services/media/framework_ffmpeg/BUILD.gn
@@ -0,0 +1,60 @@
+# Copyright 2016 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import("//build/module_args/mojo.gni")
+import("$mojo_sdk_root/mojo/public/mojo_sdk.gni")
+
+config("default_include_dirs") {
+ include_dirs = [
+ "//",
+ root_gen_dir,
+ "//third_party/ffmpeg",
+ ]
+
+ # TODO(dalesat): Why is this needed?
+ if (is_android) {
+ include_dirs +=
+ [ "//third_party/ffmpeg/chromium/config/Chromium/android/arm" ]
+ }
+}
+
+source_set("framework_ffmpeg") {
+ sources = [
+ "ffmpeg_audio_decoder.cc",
+ "ffmpeg_audio_decoder.h",
+ "ffmpeg_decoder.cc",
+ "ffmpeg_decoder.h",
+ "ffmpeg_decoder_base.cc",
+ "ffmpeg_decoder_base.h",
+ "ffmpeg_demux.cc",
+ "ffmpeg_demux.h",
+ "ffmpeg_formatting.cc",
+ "ffmpeg_formatting.h",
+ "ffmpeg_init.cc",
+ "ffmpeg_init.h",
+ "ffmpeg_io.cc",
+ "ffmpeg_io.h",
+ "ffmpeg_type_converters.cc",
+ "ffmpeg_type_converters.h",
+ "ffmpeg_video_decoder.cc",
+ "ffmpeg_video_decoder.h",
+ ]
+
+ deps = [
+ "//base",
+ "//mojo/common",
+ "//services/media/framework",
+ "//third_party/ffmpeg",
+ ]
+
+ defines = [
+ "FF_API_PIX_FMT_DESC=0",
+ "FF_API_OLD_DECODE_AUDIO=0",
+ "FF_API_DESTRUCT_PACKET=0",
+ "FF_API_GET_BUFFER=0",
+ ]
+
+ configs -= [ "//build/config/compiler:default_include_dirs" ]
+ configs += [ ":default_include_dirs" ]
+}
diff --git a/services/media/framework_ffmpeg/ffmpeg_audio_decoder.cc b/services/media/framework_ffmpeg/ffmpeg_audio_decoder.cc
new file mode 100644
index 0000000..a2d425e
--- /dev/null
+++ b/services/media/framework_ffmpeg/ffmpeg_audio_decoder.cc
@@ -0,0 +1,205 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/logging.h"
+#include "services/media/framework_ffmpeg/ffmpeg_audio_decoder.h"
+
+namespace mojo {
+namespace media {
+
+FfmpegAudioDecoder::FfmpegAudioDecoder(AvCodecContextPtr av_codec_context) :
+ FfmpegDecoderBase(std::move(av_codec_context)) {
+ DCHECK(context());
+ DCHECK(context()->channels > 0);
+
+ context()->opaque = this;
+ context()->get_buffer2 = AllocateBufferForAvFrame;
+ context()->refcounted_frames = 1;
+
+ if (av_sample_fmt_is_planar(context()->sample_fmt)) {
+ // Prepare for interleaving.
+ stream_type_ = output_stream_type();
+ lpcm_util_ = LpcmUtil::Create(*stream_type_->lpcm());
+ }
+}
+
+FfmpegAudioDecoder::~FfmpegAudioDecoder() {}
+
+int FfmpegAudioDecoder::Decode(
+ PayloadAllocator* allocator,
+ bool* frame_decoded_out) {
+ DCHECK(allocator);
+ DCHECK(frame_decoded_out);
+ DCHECK(context());
+ DCHECK(frame());
+
+ // Use the provided allocator (for allocations in AllocateBufferForAvFrame)
+ // unless we intend to interleave later, in which case use the default
+ // allocator. We'll interleave into a buffer from the provided allocator
+ // in CreateOutputPacket.
+ allocator_ = lpcm_util_ ? PayloadAllocator::GetDefault() : allocator;
+
+ int frame_decoded = 0;
+ int input_bytes_used = avcodec_decode_audio4(
+ context().get(),
+ frame().get(),
+ &frame_decoded,
+ &packet());
+ *frame_decoded_out = frame_decoded != 0;
+
+ // We're done with this allocator.
+ allocator_ = nullptr;
+
+ return input_bytes_used;
+}
+
+PacketPtr FfmpegAudioDecoder::CreateOutputPacket(PayloadAllocator* allocator) {
+ DCHECK(allocator);
+ DCHECK(frame());
+
+ int64_t presentation_time = frame()->pts;
+ if (presentation_time == AV_NOPTS_VALUE) {
+ // TODO(dalesat): Adjust next_presentation_time_ for seek/non-zero start.
+ presentation_time = next_presentation_time_;
+ next_presentation_time_ += frame()->nb_samples;
+ }
+
+ uint64_t payload_size;
+ void *payload_buffer;
+
+ AvBufferContext* av_buffer_context =
+ reinterpret_cast<AvBufferContext*>(av_buffer_get_opaque(frame()->buf[0]));
+
+ if (lpcm_util_) {
+ // We need to interleave. The non-interleaved frames are in a buffer that
+ // was allocated from the default allocator. That buffer will get released
+ // later in ReleaseBufferForAvFrame. We need a new buffer for the
+ // interleaved frames, which we get from the provided allocator.
+ DCHECK(stream_type_);
+ DCHECK(stream_type_->lpcm());
+ payload_size = stream_type_->lpcm()->min_buffer_size(frame()->nb_samples);
+ payload_buffer = allocator->AllocatePayloadBuffer(payload_size);
+
+ lpcm_util_->Interleave(
+ av_buffer_context->buffer(),
+ av_buffer_context->size(),
+ payload_buffer,
+ frame()->nb_samples);
+ } else {
+ // We don't need to interleave. The interleaved frames are in a buffer that
+ // was allocated from the correct allocator. We take ownership of the buffer
+ // by calling Release here so that ReleaseBufferForAvFrame won't release it.
+ payload_size = av_buffer_context->size();
+ payload_buffer = av_buffer_context->Release();
+ }
+
+ return Packet::Create(
+ presentation_time,
+ frame()->nb_samples,
+ false, // The base class is responsible for end-of-stream.
+ payload_size,
+ payload_buffer,
+ allocator);
+}
+
+PacketPtr FfmpegAudioDecoder::CreateOutputEndOfStreamPacket() {
+ return Packet::CreateEndOfStream(next_presentation_time_);
+}
+
+int FfmpegAudioDecoder::AllocateBufferForAvFrame(
+ AVCodecContext* av_codec_context,
+ AVFrame* av_frame,
+ int flags) {
+ // CODEC_CAP_DR1 is required in order to do allocation this way.
+ DCHECK(av_codec_context->codec->capabilities & CODEC_CAP_DR1);
+
+ FfmpegAudioDecoder* self =
+ reinterpret_cast<FfmpegAudioDecoder*>(av_codec_context->opaque);
+ DCHECK(self);
+ DCHECK(self->allocator_);
+
+ AVSampleFormat av_sample_format =
+ static_cast<AVSampleFormat>(av_frame->format);
+
+ int buffer_size = av_samples_get_buffer_size(
+ &av_frame->linesize[0],
+ av_codec_context->channels,
+ av_frame->nb_samples,
+ av_sample_format,
+ FfmpegAudioDecoder::kChannelAlign);
+ if (buffer_size < 0) {
+ LOG(WARNING) << "av_samples_get_buffer_size failed";
+ return buffer_size;
+ }
+
+ AvBufferContext* av_buffer_context =
+ new AvBufferContext(buffer_size, self->allocator_);
+ uint8_t* buffer = av_buffer_context->buffer();
+
+ if (!av_sample_fmt_is_planar(av_sample_format)) {
+ // Samples are interleaved. There's just one buffer.
+ av_frame->data[0] = buffer;
+ } else {
+ // Samples are not interleaved. There's one buffer per channel.
+ int channels = av_codec_context->channels;
+ int bytes_per_channel = buffer_size / channels;
+ uint8_t* channel_buffer = buffer;
+
+ DCHECK(buffer != nullptr || bytes_per_channel == 0);
+
+ if (channels <= AV_NUM_DATA_POINTERS) {
+ // The buffer pointers will fit in av_frame->data.
+ DCHECK_EQ(av_frame->extended_data, av_frame->data);
+ for (int channel = 0; channel < channels; ++channel) {
+ av_frame->data[channel] = channel_buffer;
+ channel_buffer += bytes_per_channel;
+ }
+ } else {
+ // Too many channels for av_frame->data. We have to use
+ // av_frame->extended_data
+ av_frame->extended_data = static_cast<uint8_t**>(
+ av_malloc(channels * sizeof(*av_frame->extended_data)));
+
+ // The first AV_NUM_DATA_POINTERS go in both data and extended_data.
+ int channel = 0;
+ for (; channel < AV_NUM_DATA_POINTERS; ++channel) {
+ av_frame->extended_data[channel] = av_frame->data[channel] =
+ channel_buffer;
+ channel_buffer += bytes_per_channel;
+ }
+
+ // The rest go only in extended_data.
+ for (; channel < channels; ++channel) {
+ av_frame->extended_data[channel] = channel_buffer;
+ channel_buffer += bytes_per_channel;
+ }
+ }
+ }
+
+ av_frame->buf[0] = av_buffer_create(
+ buffer,
+ buffer_size,
+ ReleaseBufferForAvFrame,
+ av_buffer_context,
+ 0); // flags
+
+ return 0;
+}
+
+void FfmpegAudioDecoder::ReleaseBufferForAvFrame(
+ void* opaque,
+ uint8_t* buffer) {
+ AvBufferContext* av_buffer_context =
+ reinterpret_cast<AvBufferContext*>(opaque);
+ DCHECK(av_buffer_context);
+ // Either this buffer has already been released to someone else's ownership,
+ // or it's the same as the buffer parameter.
+ DCHECK(
+ av_buffer_context->buffer() == nullptr ||
+ av_buffer_context->buffer() == buffer);
+ delete av_buffer_context;
+}
+
+} // namespace media
+} // namespace mojo
diff --git a/services/media/framework_ffmpeg/ffmpeg_audio_decoder.h b/services/media/framework_ffmpeg/ffmpeg_audio_decoder.h
new file mode 100644
index 0000000..e537b34
--- /dev/null
+++ b/services/media/framework_ffmpeg/ffmpeg_audio_decoder.h
@@ -0,0 +1,113 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef SERVICES_MEDIA_FRAMEWORK_FFMPEG_FFMPEG_AUDIO_DECODER_H_
+#define SERVICES_MEDIA_FRAMEWORK_FFMPEG_FFMPEG_AUDIO_DECODER_H_
+
+#include "services/media/framework/lpcm_util.h"
+#include "services/media/framework_ffmpeg/ffmpeg_decoder_base.h"
+
+namespace mojo {
+namespace media {
+
+// Decoder implementation employing an ffmpeg audio decoder.
+class FfmpegAudioDecoder : public FfmpegDecoderBase {
+ public:
+ FfmpegAudioDecoder(AvCodecContextPtr av_codec_context);
+
+ ~FfmpegAudioDecoder() override;
+
+ protected:
+ // FfmpegDecoderBase overrides.
+ int Decode(PayloadAllocator* allocator, bool* frame_decoded_out) override;
+
+ PacketPtr CreateOutputPacket(PayloadAllocator* allocator) override;
+
+ PacketPtr CreateOutputEndOfStreamPacket() override;
+
+ private:
+ // Used to control deallocation of buffers.
+ class AvBufferContext {
+ public:
+ AvBufferContext(size_t size, PayloadAllocator* allocator) :
+ size_(size),
+ allocator_(allocator) {
+ DCHECK(allocator_);
+ if (size_ == 0) {
+ buffer_ = nullptr;
+ } else {
+ buffer_ = static_cast<uint8_t*>(
+ allocator_->AllocatePayloadBuffer(size_));
+ }
+ }
+
+ ~AvBufferContext() {
+ if (allocator_ == nullptr) {
+ // Previously released.
+ return;
+ }
+
+ if (size_ != 0) {
+ DCHECK(buffer_ != nullptr);
+ allocator_->ReleasePayloadBuffer(size_, buffer_);
+ return;
+ }
+
+ DCHECK(buffer_ == nullptr);
+ }
+
+ uint8_t* buffer() { return buffer_; }
+
+ size_t size() { return size_; }
+
+ // Releases ownership of the buffer.
+ uint8_t* Release() {
+ DCHECK(allocator_) << "AvBufferContext released twice";
+ uint8_t* result = buffer_;
+ buffer_ = nullptr;
+ size_ = 0;
+ allocator_ = nullptr;
+ return result;
+ }
+
+ private:
+ uint8_t* buffer_;
+ size_t size_;
+ PayloadAllocator* allocator_;
+ };
+
+ // Align sample buffers on 32-byte boundaries. This is the value that Chromium
+ // uses and is supposed to work for all processor architectures. Strangely, if
+ // we were to tell ffmpeg to use the default (by passing 0), it aligns on 32
+ // sample (not byte) boundaries.
+ static const int kChannelAlign = 32;
+
+ // Callback used by the ffmpeg decoder to acquire a buffer.
+ static int AllocateBufferForAvFrame(
+ AVCodecContext* av_codec_context,
+ AVFrame* av_frame,
+ int flags);
+
+ // Callback used by the ffmpeg decoder to release a buffer.
+ static void ReleaseBufferForAvFrame(void* opaque, uint8_t* buffer);
+
+ // The allocator used by avcodec_decode_audio4 to provide context for
+ // AllocateBufferForAvFrame. This is set only during the call to
+ // avcodec_decode_audio4.
+ PayloadAllocator* allocator_;
+
+ // For interleaving, if needed.
+ std::unique_ptr<LpcmUtil> lpcm_util_;
+
+ // For interleaving, if needed.
+ std::unique_ptr<StreamType> stream_type_;
+
+ // Used to supply missing PTS.
+ int64_t next_presentation_time_= 0;
+};
+
+} // namespace media
+} // namespace mojo
+
+#endif // SERVICES_MEDIA_FRAMEWORK_FFMPEG_FFMPEG_AUDIO_DECODER_H_
diff --git a/services/media/framework_ffmpeg/ffmpeg_decoder.cc b/services/media/framework_ffmpeg/ffmpeg_decoder.cc
new file mode 100644
index 0000000..f99dc5f
--- /dev/null
+++ b/services/media/framework_ffmpeg/ffmpeg_decoder.cc
@@ -0,0 +1,50 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "services/media/framework_ffmpeg/ffmpeg_audio_decoder.h"
+#include "services/media/framework_ffmpeg/ffmpeg_decoder.h"
+#include "services/media/framework_ffmpeg/ffmpeg_type_converters.h"
+#include "services/media/framework_ffmpeg/ffmpeg_video_decoder.h"
+
+namespace mojo {
+namespace media {
+
+Result FfmpegDecoder::Create(
+ const StreamType& stream_type,
+ std::shared_ptr<Decoder>* decoder_out) {
+ DCHECK(decoder_out);
+
+ AvCodecContextPtr av_codec_context(AVCodecContextFromStreamType(stream_type));
+ if (!av_codec_context) {
+ return Result::kUnsupportedOperation;
+ }
+
+ AVCodec* ffmpeg_decoder = avcodec_find_decoder(av_codec_context->codec_id);
+ if (ffmpeg_decoder == nullptr) {
+ return Result::kUnsupportedOperation;
+ }
+
+ int r = avcodec_open2(av_codec_context.get(), ffmpeg_decoder, nullptr);
+ if (r < 0) {
+ return Result::kUnknownError;
+ }
+
+ switch (av_codec_context->codec_type) {
+ case AVMEDIA_TYPE_AUDIO:
+ *decoder_out = std::shared_ptr<Decoder>(
+ new FfmpegAudioDecoder(std::move(av_codec_context)));
+ break;
+ case AVMEDIA_TYPE_VIDEO:
+ *decoder_out = std::shared_ptr<Decoder>(
+ new FfmpegVideoDecoder(std::move(av_codec_context)));
+ break;
+ default:
+ return Result::kUnsupportedOperation;
+ }
+
+ return Result::kOk;
+}
+
+} // namespace media
+} // namespace mojo
diff --git a/services/media/framework_ffmpeg/ffmpeg_decoder.h b/services/media/framework_ffmpeg/ffmpeg_decoder.h
new file mode 100644
index 0000000..8f316d0
--- /dev/null
+++ b/services/media/framework_ffmpeg/ffmpeg_decoder.h
@@ -0,0 +1,29 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef SERVICES_MEDIA_FRAMEWORK_FFMPEG_FFMPEG_DECODER_H_
+#define SERVICES_MEDIA_FRAMEWORK_FFMPEG_FFMPEG_DECODER_H_
+
+#include <memory>
+
+#include "services/media/framework/parts/decoder.h"
+
+namespace mojo {
+namespace media {
+
+// Abstract base class for ffmpeg-based decoders, just the create function.
+// We don't want the base class implementation here, because we don't want
+// dependent targets to have to deal with ffmpeg includes.
+class FfmpegDecoder : public Decoder {
+ public:
+ // Creates an ffmpeg-based Decoder object for a given media type.
+ static Result Create(
+ const StreamType& stream_type,
+ std::shared_ptr<Decoder>* decoder_out);
+};
+
+} // namespace media
+} // namespace mojo
+
+#endif // SERVICES_MEDIA_FRAMEWORK_FFMPEG_FFMPEG_DECODER_H_
diff --git a/services/media/framework_ffmpeg/ffmpeg_decoder_base.cc b/services/media/framework_ffmpeg/ffmpeg_decoder_base.cc
new file mode 100644
index 0000000..c0cb980
--- /dev/null
+++ b/services/media/framework_ffmpeg/ffmpeg_decoder_base.cc
@@ -0,0 +1,97 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/logging.h"
+#include "services/media/framework_ffmpeg/ffmpeg_decoder_base.h"
+#include "services/media/framework_ffmpeg/ffmpeg_type_converters.h"
+
+namespace mojo {
+namespace media {
+
+FfmpegDecoderBase::FfmpegDecoderBase(AvCodecContextPtr av_codec_context) :
+ av_codec_context_(std::move(av_codec_context)),
+ av_frame_(av_frame_alloc()) {
+ DCHECK(av_codec_context);
+}
+
+FfmpegDecoderBase::~FfmpegDecoderBase() {}
+
+std::unique_ptr<StreamType> FfmpegDecoderBase::output_stream_type() {
+ return StreamTypeFromAVCodecContext(*av_codec_context_);
+}
+
+void FfmpegDecoderBase::Flush() {
+ DCHECK(av_codec_context_);
+ avcodec_flush_buffers(av_codec_context_.get());
+}
+
+bool FfmpegDecoderBase::TransformPacket(
+ const PacketPtr& input,
+ bool new_input,
+ PayloadAllocator* allocator,
+ PacketPtr* output) {
+ DCHECK(input);
+ DCHECK(allocator);
+ DCHECK(output);
+
+ *output = nullptr;
+
+ if (new_input) {
+ PrepareInputPacket(input);
+ }
+
+ bool frame_decoded = false;
+ int input_bytes_used = Decode(allocator, &frame_decoded);
+ if (input_bytes_used < 0) {
+ // Decode failed.
+ return UnprepareInputPacket(input, output);
+ }
+
+ if (frame_decoded) {
+ DCHECK(allocator);
+ *output = CreateOutputPacket(allocator);
+ av_frame_unref(av_frame_.get());
+ }
+
+ CHECK(input_bytes_used <= av_packet_.size)
+ << "Ffmpeg decoder read beyond end of packet";
+ av_packet_.size -= input_bytes_used;
+ av_packet_.data += input_bytes_used;
+
+ if (av_packet_.size != 0 || (input->end_of_stream() && frame_decoded)) {
+ // The input packet is only partially decoded, or it's an end-of-stream
+ // packet and we're still draining. Let the caller know we want to see the
+ // input packet again.
+ return false;
+ }
+
+ // Used up the whole input packet, and, if we were draining, we're done with
+ // that too.
+ return UnprepareInputPacket(input, output);
+}
+
+void FfmpegDecoderBase::PrepareInputPacket(const PacketPtr& input) {
+ av_init_packet(&av_packet_);
+ av_packet_.data = reinterpret_cast<uint8_t*>(input->payload());
+ av_packet_.size = input->size();
+}
+
+bool FfmpegDecoderBase::UnprepareInputPacket(
+ const PacketPtr& input,
+ PacketPtr* output) {
+ if (input->end_of_stream()) {
+ // Indicate end of stream. This happens when we're draining for the last
+ // time, so there should be no output packet yet.
+ DCHECK(*output == nullptr);
+ *output = CreateOutputEndOfStreamPacket();
+ }
+
+ av_packet_.size = 0;
+ av_packet_.data = nullptr;
+
+ return true;
+}
+
+} // namespace media
+} // namespace mojo
diff --git a/services/media/framework_ffmpeg/ffmpeg_decoder_base.h b/services/media/framework_ffmpeg/ffmpeg_decoder_base.h
new file mode 100644
index 0000000..833673f
--- /dev/null
+++ b/services/media/framework_ffmpeg/ffmpeg_decoder_base.h
@@ -0,0 +1,87 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef SERVICES_MEDIA_FRAMEWORK_FFMPEG_FFMPEG_DECODER_BASE_H_
+#define SERVICES_MEDIA_FRAMEWORK_FFMPEG_FFMPEG_DECODER_BASE_H_
+
+#include "services/media/framework/parts/decoder.h"
+#include "services/media/framework_ffmpeg/ffmpeg_type_converters.h"
+extern "C" {
+#include "third_party/ffmpeg/libavcodec/avcodec.h"
+}
+
+namespace mojo {
+namespace media {
+
+// Abstract base class for ffmpeg-based decoders.
+class FfmpegDecoderBase : public Decoder {
+ public:
+ FfmpegDecoderBase(AvCodecContextPtr av_codec_context);
+
+ ~FfmpegDecoderBase() override;
+
+ // Decoder implementation.
+ std::unique_ptr<StreamType> output_stream_type() override;
+
+ // Transform implementation.
+ void Flush() override;
+
+ bool TransformPacket(
+ const PacketPtr& input,
+ bool new_input,
+ PayloadAllocator* allocator,
+ PacketPtr* output) override;
+
+ protected:
+ struct AVFrameDeleter {
+ inline void operator()(AVFrame* ptr) const {
+ av_frame_free(&ptr);
+ }
+ };
+
+ // Decodes from av_packet_ into av_frame_. The result indicates how many
+ // bytes were consumed from av_packet_. *frame_decoded_out indicates whether
+ // av_frame_ contains a complete frame.
+ virtual int Decode(PayloadAllocator* allocator, bool* frame_decoded_out) = 0;
+
+ // Creates a Packet from av_frame_.
+ virtual PacketPtr CreateOutputPacket(PayloadAllocator* allocator) = 0;
+
+ // Creates an end-of-stream packet with no payload.
+ virtual PacketPtr CreateOutputEndOfStreamPacket() = 0;
+
+ protected:
+ // The ffmpeg codec context.
+ const AvCodecContextPtr& context() {
+ return av_codec_context_;
+ }
+
+ // Ffmpeg's representation of the input packet.
+ const AVPacket& packet() {
+ return av_packet_;
+ }
+
+ // Ffmpeg's representation of the output packet.
+ const std::unique_ptr<AVFrame, AVFrameDeleter>& frame() {
+ return av_frame_;
+ }
+
+ private:
+ // Prepares to process a new input packet.
+ void PrepareInputPacket(const PacketPtr& input);
+
+ // Finishes up after processing of an input packet has completed, possibly
+ // producing a zero-size end-of-stream packet. Returns true to indicate that
+ // a new input packet is required.
+ bool UnprepareInputPacket(const PacketPtr& input, PacketPtr* output);
+
+ AvCodecContextPtr av_codec_context_;
+ AVPacket av_packet_;
+ std::unique_ptr<AVFrame, AVFrameDeleter> av_frame_;
+};
+
+} // namespace media
+} // namespace mojo
+
+#endif // SERVICES_MEDIA_FRAMEWORK_FFMPEG_FFMPEG_DECODER_BASE_H_
diff --git a/services/media/framework_ffmpeg/ffmpeg_demux.cc b/services/media/framework_ffmpeg/ffmpeg_demux.cc
new file mode 100644
index 0000000..c898cc3
--- /dev/null
+++ b/services/media/framework_ffmpeg/ffmpeg_demux.cc
@@ -0,0 +1,262 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <map>
+
+#include "base/logging.h"
+#include "services/media/framework/safe_clone.h"
+#include "services/media/framework_ffmpeg/ffmpeg_demux.h"
+#include "services/media/framework_ffmpeg/ffmpeg_io.h"
+#include "services/media/framework_ffmpeg/ffmpeg_type_converters.h"
+
+namespace mojo {
+namespace media {
+
+class FfmpegDemuxImpl : public FfmpegDemux {
+ public:
+ FfmpegDemuxImpl();
+
+ ~FfmpegDemuxImpl() override;
+
+ // Demux implementation.
+ Result Init(std::shared_ptr<Reader> reader) override;
+
+ std::unique_ptr<Metadata> metadata() const override;
+
+ const std::vector<DemuxStream*>& streams() const override;
+
+ // MultistreamSource implementation.
+ size_t stream_count() const override;
+
+ PacketPtr PullPacket(size_t* stream_index_out) override;
+
+ private:
+ class FfmpegDemuxStream : public DemuxStream {
+ public:
+ FfmpegDemuxStream(const AVFormatContext& format_context, size_t index);
+
+ ~FfmpegDemuxStream() override;
+
+ // Demux::DemuxStream implementation.
+ size_t index() const override;
+
+ std::unique_ptr<StreamType> stream_type() const override;
+
+ private:
+ AVStream* stream_;
+ size_t index_;
+ std::unique_ptr<StreamType> stream_type_;
+ };
+
+ // Specialized packet implementation.
+ class DemuxPacket : public Packet {
+ public:
+ DemuxPacket() {
+ av_init_packet(&av_packet_);
+ }
+
+ // Packet implementation.
+ int64_t presentation_time() const override { return av_packet_.pts; };
+
+ uint64_t duration() const override { return av_packet_.duration; };
+
+ bool end_of_stream() const override { return false; }
+
+ size_t size() const override { return size_t(av_packet_.size); }
+
+ void* payload() const override {
+ return reinterpret_cast<void*>(av_packet_.data);
+ }
+
+ AVPacket& av_packet() {
+ return av_packet_;
+ }
+
+ protected:
+ ~DemuxPacket() override {
+ av_free_packet(&av_packet_);
+ }
+
+ void Release() override { delete this; }
+
+ private:
+ AVPacket av_packet_;
+ };
+
+ struct AVFormatContextDeleter {
+ inline void operator()(AVFormatContext* ptr) const {
+ avformat_free_context(ptr);
+ }
+ };
+
+ // Produces an end-of-stream packet for next_stream_to_end_.
+ PacketPtr PullEndOfStreamPacket(size_t* stream_index_out);
+
+ // Copies metadata from the specified source into map.
+ void CopyMetadata(
+ AVDictionary* source,
+ std::map<std::string, std::string>& map);
+
+ std::shared_ptr<Reader> reader_;
+ std::unique_ptr<AVFormatContext, AVFormatContextDeleter> format_context_;
+ AvioContextPtr io_context_;
+ std::vector<DemuxStream*> streams_;
+ std::unique_ptr<Metadata> metadata_;
+ int64_t next_presentation_time_;
+ int next_stream_to_end_; // -1 means don't end. streams_.size() means stop.
+};
+
+// static
+std::shared_ptr<Demux> FfmpegDemux::Create() {
+ return std::shared_ptr<Demux>(new FfmpegDemuxImpl());
+}
+
+FfmpegDemuxImpl::FfmpegDemuxImpl() : next_stream_to_end_(-1) {}
+
+FfmpegDemuxImpl::~FfmpegDemuxImpl() {}
+
+Result FfmpegDemuxImpl::Init(std::shared_ptr<Reader> reader) {
+ static constexpr uint64_t kNanosecondsPerMicrosecond = 1000;
+
+ reader_ = reader;
+
+ io_context_ = CreateAvioContext(reader.get());
+ if (!io_context_) {
+ LOG(ERROR) << "CreateAvioContext failed (allocation failure)";
+ return Result::kInternalError;
+ }
+
+ // TODO(dalesat): Consider ffmpeg util to centralize memory management.
+ AVFormatContext* format_context = avformat_alloc_context();
+ format_context->flags |= AVFMT_FLAG_CUSTOM_IO | AVFMT_FLAG_FAST_SEEK;
+ format_context->pb = io_context_.get();
+
+ // TODO(dalesat): This synchronous operation may take a long time.
+ int r = avformat_open_input(&format_context, nullptr, nullptr, nullptr);
+ format_context_.reset(format_context);
+ if (r < 0) {
+ return Result::kInternalError;
+ }
+
+ // TODO(dalesat): This synchronous operation may take a long time.
+ r = avformat_find_stream_info(format_context_.get(), nullptr);
+ if (r < 0) {
+ LOG(ERROR) << "avformat_find_stream_info failed, result " << r;
+ return Result::kInternalError;
+ }
+
+ std::map<std::string, std::string> metadata_map;
+
+ CopyMetadata(format_context_->metadata, metadata_map);
+ for (uint i = 0; i < format_context_->nb_streams; i++) {
+ streams_.push_back(new FfmpegDemuxStream(*format_context_, i));
+ CopyMetadata(format_context_->streams[i]->metadata, metadata_map);
+ }
+
+ metadata_ = Metadata::Create(
+ format_context_->duration * kNanosecondsPerMicrosecond,
+ metadata_map["TITLE"],
+ metadata_map["ARTIST"],
+ metadata_map["ALBUM"],
+ metadata_map["PUBLISHER"],
+ metadata_map["GENRE"],
+ metadata_map["COMPOSER"]);
+
+ return Result::kOk;
+}
+
+std::unique_ptr<Metadata> FfmpegDemuxImpl::metadata() const {
+ return SafeClone(metadata_);
+}
+
+const std::vector<Demux::DemuxStream*>& FfmpegDemuxImpl::streams() const {
+ return streams_;
+}
+
+size_t FfmpegDemuxImpl::stream_count() const {
+ if (!format_context_) {
+ return 0;
+ }
+ return format_context_->nb_streams;
+}
+
+PacketPtr FfmpegDemuxImpl::PullPacket(size_t* stream_index_out) {
+ DCHECK(stream_index_out);
+
+ if (next_stream_to_end_ != -1) {
+ // We're producing end-of-stream packets for all the streams.
+ return PullEndOfStreamPacket(stream_index_out);
+ }
+
+ FfmpegDemuxImpl::DemuxPacket* demux_packet =
+ new FfmpegDemuxImpl::DemuxPacket();
+
+ demux_packet->av_packet().data = nullptr;
+ demux_packet->av_packet().size = 0;
+
+ if (av_read_frame(format_context_.get(), &demux_packet->av_packet()) < 0) {
+ // End of stream. Start producing end-of-stream packets for all the streams.
+ PacketPtr(demux_packet); // Deletes demux_packet.
+ next_stream_to_end_ = 0;
+ return PullEndOfStreamPacket(stream_index_out);
+ }
+
+ *stream_index_out =
+ static_cast<size_t>(demux_packet->av_packet().stream_index);
+ // TODO(dalesat): What if the packet has no PTS or duration?
+ next_presentation_time_ =
+ demux_packet->presentation_time() + demux_packet->duration();
+
+ return PacketPtr(demux_packet);
+}
+
+PacketPtr FfmpegDemuxImpl::PullEndOfStreamPacket(size_t* stream_index_out) {
+ DCHECK(next_stream_to_end_ >= 0);
+
+ if (static_cast<std::size_t>(next_stream_to_end_) >= streams_.size()) {
+ NOTREACHED() << "PullPacket called after all streams have ended";
+ return nullptr;
+ }
+
+ *stream_index_out = next_stream_to_end_++;
+ return Packet::CreateEndOfStream(next_presentation_time_);
+}
+
+void FfmpegDemuxImpl::CopyMetadata(
+ AVDictionary* source,
+ std::map<std::string, std::string>& map) {
+ if (source == nullptr) {
+ return;
+ }
+
+ for (AVDictionaryEntry *entry =
+ av_dict_get(source, "", nullptr, AV_DICT_IGNORE_SUFFIX);
+ entry != nullptr;
+ entry = av_dict_get(source, "", entry, AV_DICT_IGNORE_SUFFIX)) {
+ if (map.find(entry->key) == map.end()) {
+ map.emplace(entry->key, entry->value);
+ }
+ }
+}
+
+FfmpegDemuxImpl::FfmpegDemuxStream::FfmpegDemuxStream(
+ const AVFormatContext& format_context,
+ size_t index) :
+ stream_(format_context.streams[index]), index_(index) {
+ stream_type_ = StreamTypeFromAVCodecContext(*stream_->codec);
+}
+
+FfmpegDemuxImpl::FfmpegDemuxStream::~FfmpegDemuxStream() {}
+
+size_t FfmpegDemuxImpl::FfmpegDemuxStream::index() const {
+ return index_;
+}
+
+std::unique_ptr<StreamType> FfmpegDemuxImpl::FfmpegDemuxStream::stream_type()
+ const {
+ return SafeClone(stream_type_);
+}
+
+} // namespace media
+} // namespace mojo
diff --git a/services/media/framework_ffmpeg/ffmpeg_demux.h b/services/media/framework_ffmpeg/ffmpeg_demux.h
new file mode 100644
index 0000000..281fb17
--- /dev/null
+++ b/services/media/framework_ffmpeg/ffmpeg_demux.h
@@ -0,0 +1,23 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef SERVICES_MEDIA_FRAMEWORK_FFMPEG_FFMPEG_DEMUX_H_
+#define SERVICES_MEDIA_FRAMEWORK_FFMPEG_FFMPEG_DEMUX_H_
+
+#include <memory>
+
+#include "services/media/framework/parts/demux.h"
+
+namespace mojo {
+namespace media {
+
+class FfmpegDemux : public Demux {
+ public:
+ static std::shared_ptr<Demux> Create();
+};
+
+} // namespace media
+} // namespace mojo
+
+#endif // SERVICES_MEDIA_FRAMEWORK_FFMPEG_FFMPEG_DEMUX_H_
diff --git a/services/media/framework_ffmpeg/ffmpeg_formatting.cc b/services/media/framework_ffmpeg/ffmpeg_formatting.cc
new file mode 100644
index 0000000..2367311
--- /dev/null
+++ b/services/media/framework_ffmpeg/ffmpeg_formatting.cc
@@ -0,0 +1,805 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <ostream>
+
+#include "services/media/framework_ffmpeg/ffmpeg_formatting.h"
+extern "C" {
+#include "third_party/ffmpeg/libavformat/avformat.h"
+#include "third_party/ffmpeg/libavformat/internal.h"
+#include "third_party/ffmpeg/libavutil/dict.h"
+}
+
+namespace mojo {
+namespace media {
+
+const char* safe(const char* s) {
+ return s == nullptr ? "<nullptr>" : s;
+}
+
+std::ostream& operator<<(
+ std::ostream& os,
+ const struct AVCodecTag *const *value) {
+ if (value == nullptr) {
+ return os << "<nullptr>" << std::endl;
+ } else if (*value == nullptr) {
+ return os << "&<nullptr>" << std::endl;
+ } else {
+ os << std::endl;
+ }
+
+ os << indent;
+ os << begl << "AVCodecID id: " << (*value)->id << std::endl;
+ os << begl << "unsigned int tag: " << (*value)->tag << std::endl;
+ return os << outdent;
+}
+
+std::ostream& operator<<(std::ostream& os, const AVInputFormat *value) {
+ if (value == nullptr) {
+ return os << "<nullptr>" << std::endl;
+ } else {
+ os << std::endl;
+ }
+
+ os << indent;
+ os << begl << "const char *name: " << value->name << std::endl;
+ os << begl << "const char *long_name: " << value->long_name << std::endl;
+ os << begl << "int flags: " << AVFMTFlags(value->flags);
+ os << begl << "const char *extensions: " << safe(value->extensions)
+ << std::endl;
+ os << begl << "const AVCodecTag * const *codec_tag: " << value->codec_tag;
+ os << begl << "const char *mime_type: " << safe(value->mime_type)
+ << std::endl;
+ return os << outdent;
+}
+
+std::ostream& operator<<(std::ostream& os, const AVOutputFormat *value) {
+ if (value == nullptr) {
+ return os << "<nullptr>" << std::endl;
+ } else {
+ os << std::endl;
+ }
+
+ os << indent;
+ os << begl << "const char *name: " << safe(value->name) << std::endl;
+ os << begl << "const char *long_name: " << safe(value->long_name)
+ << std::endl;
+ os << begl << "const char *mime_type: " << safe(value->mime_type)
+ << std::endl;
+ os << begl << "const char *extensions: " << safe(value->extensions)
+ << std::endl;
+ os << begl << "AVCodecID audio_codec: " << value->audio_codec;
+ os << begl << "AVCodecID video_codec: " << value->video_codec;
+ os << begl << "AVCodecID subtitle_codec: " << value->subtitle_codec;
+ os << begl << "int flags: " << AVFMTFlags(value->flags);
+ os << begl << "const AVCodecTag * const *codec_tag: " << value->codec_tag;
+ return os << outdent;
+}
+
+std::ostream& operator<<(std::ostream& os, const AVIOContext *value) {
+ if (value == nullptr) {
+ return os << "<nullptr>" << std::endl;
+ } else {
+ return os << "TODO" << std::endl;
+ }
+}
+
+std::ostream& operator<<(std::ostream& os, AVFMTCTXFlags value) {
+ if (value.flags_ == 0) {
+ return os << "<none>" << std::endl;
+ }
+
+ if (value.flags_ & AVFMTCTX_NOHEADER) {
+ return os << "AVFMTCTX_NOHEADER" << std::endl;
+ } else {
+ return os << "<UNKNOWN AVFMTCTX_: " << value.flags_ << ">" << std::endl;
+ }
+}
+
+std::ostream& operator<<(std::ostream& os, const AVRational *value) {
+ if (value == nullptr) {
+ return os << "<none>" << std::endl;
+ } else {
+ os << std::endl;
+ }
+
+ os << indent;
+ for (int index = 0; value->num != 0 || value->den != 0; ++value, ++index) {
+ os << begl << "[" << index << "]: " << *value;
+ }
+ return os << outdent;
+}
+
+std::ostream& operator<<(std::ostream& os, const int *value) {
+ if (value == nullptr) {
+ return os << "<none>" << std::endl;
+ } else {
+ os << std::endl;
+ }
+
+ os << indent;
+ for (int index = 0; *value != 0; ++value, ++index) {
+ os << begl << "[" << index << "]: " << *value << std::endl;
+ }
+ return os << outdent;
+}
+
+std::ostream& operator<<(std::ostream& os, const uint64_t *value) {
+ if (value == nullptr) {
+ return os << "<none>" << std::endl;
+ } else {
+ os << std::endl;
+ }
+
+ os << indent;
+ for (int index = 0; *value != 0; ++value, ++index) {
+ os << begl << "[" << index << "]: " << *value << std::endl;
+ }
+ return os << outdent;
+}
+
+std::ostream& operator<<(std::ostream& os, const AVSampleFormat *value) {
+ if (value == nullptr) {
+ return os << "<none>" << std::endl;
+ } else {
+ os << std::endl;
+ }
+
+ os << indent;
+ for (int index = 0; int(*value) != 0; ++value, ++index) {
+ os << begl << "[" << index << "]: " << *value;
+ }
+ return os << outdent;
+}
+
+std::ostream& operator<<(std::ostream& os, const AVCodec *value) {
+ if (value == nullptr) {
+ return os << "<nullptr>" << std::endl;
+ } else {
+ os << std::endl;
+ }
+
+ os << indent;
+ os << begl << "const char *name: " << safe(value->name) << std::endl;
+ os << begl << "const char *long_name: " << safe(value->long_name)
+ << std::endl;
+ os << begl << "AVMediaType type: " << value->type;
+ os << begl << "AVCodecID id: " << value->id;
+ os << begl << "int capabilities: " << value->capabilities << std::endl;
+ os << begl << "AVRational *supported_framerates: "
+ << value->supported_framerates;
+ os << begl << "const int *supported_samplerates: "
+ << value->supported_samplerates;
+ os << begl << "const AVSampleFormat *sample_fmts: " << value->sample_fmts;
+ os << begl << "const uint64_t *channel_layouts: " << value->channel_layouts;
+
+ return os << outdent;
+}
+
+std::ostream& operator<<(std::ostream& os, const AVCodecContext *value) {
+ if (value == nullptr) {
+ return os << "<nullptr>" << std::endl;
+ } else {
+ os << std::endl;
+ }
+
+ os << indent;
+ os << begl << "AVMediaType codec_type: " << value->codec_type;
+ os << begl << "const struct AVCodec *codec: " << value->codec;
+ os << begl << "AVCodecID codec_id: " << value->codec_id;
+ os << begl << "int bit_rate: " << value->bit_rate << std::endl;
+ os << begl << "int extradata_size: " << value->extradata_size << std::endl;
+ os << begl << "int width: " << value->width << std::endl;
+ os << begl << "int height: " << value->height << std::endl;
+ os << begl << "int coded_width: " << value->coded_width << std::endl;
+ os << begl << "int coded_height: " << value->coded_height << std::endl;
+ os << begl << "int gop_size: " << value->gop_size << std::endl;
+ os << begl << "int sample_rate: " << value->sample_rate << std::endl;
+ os << begl << "int channels: " << value->channels << std::endl;
+ os << begl << "AVSampleFormat sample_fmt: " << value->sample_fmt;
+ os << begl << "int frame_size: " << value->frame_size << std::endl;
+ os << begl << "int frame_number: " << value->frame_number << std::endl;
+ os << begl << "int block_align: " << value->block_align << std::endl;
+ os << begl << "int cutoff: " << value->cutoff << std::endl;
+ os << begl << "uint64_t channel_layout: " << value->channel_layout
+ << std::endl;
+ os << begl << "uint64_t request_channel_layout: "
+ << value->request_channel_layout << std::endl;
+ os << begl << "AVAudioServiceType audio_service_type: "
+ << value->audio_service_type << std::endl;
+ os << begl << "AVSampleFormat request_sample_fmt: "
+ << value->request_sample_fmt;
+ os << begl << "int profile: " << value->profile << std::endl;
+ return os << outdent;
+}
+
+std::ostream& operator<<(std::ostream& os, const AVRational& value) {
+ return os << value.num << "/" << value.den << std::endl;
+}
+
+std::ostream& operator<<(std::ostream& os, const AVStream *value) {
+ if (value == nullptr) {
+ return os << "<nullptr>" << std::endl;
+ } else {
+ os << std::endl;
+ }
+
+ os << indent;
+ os << begl << "int index: " << value->index << std::endl;
+ os << begl << "int id: " << value->id << std::endl;
+ os << begl << "AVCodecContext *codec: " << value->codec;
+ os << begl << "AVRational time_base: " << value->time_base;
+ os << begl << "int64_t start_time: " << value->start_time << std::endl;
+ os << begl << "int64_t duration: " << value->duration << std::endl;
+ os << begl << "int64_t nb_frames: " << value->nb_frames << std::endl;
+ os << begl << "int disposition: " << AV_DISPOSITIONFlags(value->disposition);
+ os << begl << "AVDiscard discard: " << value->discard;
+ os << begl << "AVRational sample_aspect_ratio: "
+ << value->sample_aspect_ratio;
+ os << begl << "AVDictionary *metadata: " << value->metadata;
+ os << begl << "AVRational avg_frame_rate: " << value->avg_frame_rate;
+ os << begl << "AVPacket attached_pic: " << &value->attached_pic;
+ os << begl << "int nb_side_data: " << value->nb_side_data << std::endl;
+ os << begl << "AVPacketSideData side_data: " <<
+ AVPacketSideDataArray(value->side_data, value->nb_side_data);
+ os << begl << "int event_flags: " << AVSTREAM_EVENTFlags(value->event_flags);
+ return os << outdent;
+}
+
+std::ostream& operator<<(std::ostream& os, const AVStreamArray& value) {
+ if (value.items_ == nullptr) {
+ return os << "<nullptr>" << std::endl;
+ } else if (value.count_ == 0) {
+ return os << "<empty>" << std::endl;
+ } else {
+ os << std::endl;
+ }
+
+ os << indent;
+ for (unsigned int i = 0; i < value.count_; i++) {
+ os << begl << "[" << i << "] " << value.items_[i];
+ }
+ return os << outdent;
+}
+
+std::ostream& operator<<(std::ostream& os, AVFMTFlags value) {
+ if (value.flags_ == 0) {
+ os << "<none>" << std::endl;
+ return os;
+ } else {
+ os << std::endl;
+ }
+
+ os << indent;
+ if (value.flags_ & AVFMT_FLAG_GENPTS) {
+ os << begl << "AVFMT_FLAG_GENPTS" << std::endl;
+ }
+ if (value.flags_ & AVFMT_FLAG_IGNIDX) {
+ os << begl << "AVFMT_FLAG_IGNIDX" << std::endl;
+ }
+ if (value.flags_ & AVFMT_FLAG_NONBLOCK) {
+ os << begl << "AVFMT_FLAG_NONBLOCK" << std::endl;
+ }
+ if (value.flags_ & AVFMT_FLAG_IGNDTS) {
+ os << begl << "AVFMT_FLAG_IGNDTS" << std::endl;
+ }
+ if (value.flags_ & AVFMT_FLAG_NOFILLIN) {
+ os << begl << "AVFMT_FLAG_NOFILLIN" << std::endl;
+ }
+ if (value.flags_ & AVFMT_FLAG_NOPARSE) {
+ os << begl << "AVFMT_FLAG_NOPARSE" << std::endl;
+ }
+ if (value.flags_ & AVFMT_FLAG_NOBUFFER) {
+ os << begl << "AVFMT_FLAG_NOBUFFER" << std::endl;
+ }
+ if (value.flags_ & AVFMT_FLAG_CUSTOM_IO) {
+ os << begl << "AVFMT_FLAG_CUSTOM_IO" << std::endl;
+ }
+ if (value.flags_ & AVFMT_FLAG_DISCARD_CORRUPT) {
+ os << begl << "AVFMT_FLAG_DISCARD_CORRUPT" << std::endl;
+ }
+ if (value.flags_ & AVFMT_FLAG_FLUSH_PACKETS) {
+ os << begl << "AVFMT_FLAG_FLUSH_PACKETS" << std::endl;
+ }
+ if (value.flags_ & AVFMT_FLAG_BITEXACT) {
+ os << begl << "AVFMT_FLAG_BITEXACT" << std::endl;
+ }
+ if (value.flags_ & AVFMT_FLAG_MP4A_LATM) {
+ os << begl << "AVFMT_FLAG_MP4A_LATM" << std::endl;
+ }
+ if (value.flags_ & AVFMT_FLAG_SORT_DTS) {
+ os << begl << "AVFMT_FLAG_SORT_DTS" << std::endl;
+ }
+ if (value.flags_ & AVFMT_FLAG_PRIV_OPT) {
+ os << begl << "AVFMT_FLAG_PRIV_OPT" << std::endl;
+ }
+ if (value.flags_ & AVFMT_FLAG_KEEP_SIDE_DATA) {
+ os << begl << "AVFMT_FLAG_KEEP_SIDE_DATA" << std::endl;
+ }
+ if (value.flags_ & AVFMT_FLAG_FAST_SEEK) {
+ os << begl << "AVFMT_FLAG_FAST_SEEK" << std::endl;
+ }
+ return os << outdent;
+}
+
+std::ostream& operator<<(std::ostream& os, AV_DISPOSITIONFlags value) {
+ if (value.flags_ == 0) {
+ os << "<none>" << std::endl;
+ return os;
+ } else {
+ os << std::endl;
+ }
+
+ os << indent;
+ if (value.flags_ & AV_DISPOSITION_DEFAULT) {
+ os << begl << "AV_DISPOSITION_DEFAULT 0x0001" << std::endl;
+ }
+ if (value.flags_ & AV_DISPOSITION_DUB) {
+ os << begl << "AV_DISPOSITION_DUB 0x0002" << std::endl;
+ }
+ if (value.flags_ & AV_DISPOSITION_ORIGINAL) {
+ os << begl << "AV_DISPOSITION_ORIGINAL 0x0004" << std::endl;
+ }
+ if (value.flags_ & AV_DISPOSITION_COMMENT) {
+ os << begl << "AV_DISPOSITION_COMMENT 0x0008" << std::endl;
+ }
+ if (value.flags_ & AV_DISPOSITION_LYRICS) {
+ os << begl << "AV_DISPOSITION_LYRICS 0x0010" << std::endl;
+ }
+ if (value.flags_ & AV_DISPOSITION_KARAOKE) {
+ os << begl << "AV_DISPOSITION_KARAOKE 0x0020" << std::endl;
+ }
+ return os << outdent;
+}
+
+std::ostream& operator<<(std::ostream& os, const AVBufferRef *value) {
+ if (value == nullptr) {
+ return os << "<nullptr>" << std::endl;
+ } else {
+ os << std::endl;
+ }
+
+ os << indent;
+ os << begl << "AVBuffer *buffer: "
+ << (value->buffer == nullptr ? "<nullptr>" : "TODO") << std::endl;
+ os << begl << "uint8_t *data: "
+ << (value->data == nullptr ? "<nullptr>" : "<opaque>") << std::endl;
+ os << begl << "int size: " << value->size << std::endl;
+ return os << outdent;
+}
+
+std::ostream& operator<<(std::ostream& os, const AVFrame *value) {
+ if (value == nullptr) {
+ return os << "<nullptr>" << std::endl;
+ } else {
+ os << std::endl;
+ }
+
+ os << indent;
+ os << begl << "uint8_t *data[AV_NUM_DATA_POINTERS]: ";
+ {
+ os << indent;
+ bool any = false;
+ for (int i = 0; i < AV_NUM_DATA_POINTERS; i++) {
+ if (value->data[i] != nullptr) {
+ if (!any) {
+ any = true;
+ os << std::endl;
+ }
+ os << begl << "[" << i << "]: <opaque>" << std::endl;
+ }
+ }
+ if (!any) {
+ os << "<all nullptr>" << std::endl;
+ }
+ os << outdent;
+ }
+
+ os << begl << "int linesize[AV_NUM_DATA_POINTERS]: ";
+ {
+ os << indent;
+ bool any = false;
+ for (int i = 0; i < AV_NUM_DATA_POINTERS; i++) {
+ if (value->linesize[i] != 0) {
+ if (!any) {
+ any = true;
+ os << std::endl;
+ }
+ os << begl << "[" << i << "]: " << value->linesize[i] << std::endl;
+ }
+ }
+ if (!any) {
+ os << "<all zero>" << std::endl;
+ }
+ os << outdent;
+ }
+
+ os << begl << "uint8_t **extended_data: "
+ << (value->extended_data == nullptr ? "<nullptr>" : "<opaque>")
+ << std::endl;
+ os << begl << "int width: " << value->width << std::endl;
+ os << begl << "int height: " << value->height << std::endl;
+ os << begl << "int nb_samples: " << value->nb_samples << std::endl;
+ os << begl << "int format: " << value->format << std::endl;
+ os << begl << "int key_frame: " << value->key_frame << std::endl;
+ os << begl << "int64_t pts: " << value->pts << std::endl;
+ os << begl << "int64_t pkt_pts: " << value->pkt_pts << std::endl;
+ os << begl << "int64_t pkt_dts: " << value->pkt_dts << std::endl;
+ os << begl << "int sample_rate: " << value->sample_rate << std::endl;
+ os << begl << "AVBufferRef *buf[AV_NUM_DATA_POINTERS]: ";
+ {
+ os << indent;
+ bool any = false;
+ for (int i = 0; i < AV_NUM_DATA_POINTERS; i++) {
+ if (value->buf[i] != nullptr) {
+ if (!any) {
+ any = true;
+ os << std::endl;
+ }
+ os << begl << "[" << i << "]:" << value->buf[i];
+ }
+ }
+ if (!any) {
+ os << "<all nullptr>" << std::endl;
+ }
+ os << outdent;
+ }
+ os << begl << "int channels: " << value->channels << std::endl;
+ os << begl << "int pkt_size: " << value->pkt_size << std::endl;
+ return os << outdent;
+}
+
+std::ostream& operator<<(std::ostream& os, const AVPacket *value) {
+ if (value == nullptr) {
+ return os << "<nullptr>" << std::endl;
+ } else {
+ os << std::endl;
+ }
+
+ os << indent;
+ os << begl << "AVBufferRef *buf: " << value->buf;
+ os << begl << "int64_t pts: " << value->pts << std::endl;
+ os << begl << "int64_t dts: " << value->dts << std::endl;
+ os << begl << "uint8_t *data: "
+ << (value->data == nullptr ? "<nullptr>" : "<opaque>") << std::endl;
+ os << begl << "int size: " << value->size << std::endl;
+ os << begl << "int stream_index: " << value->stream_index << std::endl;
+ os << begl << "int flags: " << value->flags << std::endl;
+ os << begl << "AVPacketSideData *side_data: " << value->side_data;
+ os << begl << "int side_data_elems: " << value->side_data_elems << std::endl;
+ os << begl << "int duration: " << value->duration << std::endl;
+ os << begl << "int64_t pos: " << value->pos << std::endl;
+ os << begl << "int64_t convergence_duration: "
+ << value->convergence_duration << std::endl;
+ return os << outdent;
+}
+
+std::ostream& operator<<(std::ostream& os, const AVPacketSideData *value) {
+ if (value == nullptr) {
+ return os << "<nullptr>" << std::endl;
+ } else {
+ return os << "TODO" << std::endl;
+ }
+}
+
+std::ostream& operator<<(std::ostream& os, const AVPacketSideDataArray& value) {
+ if (value.items_ == nullptr) {
+ return os << "<nullptr>" << std::endl;
+ } else if (value.count_ == 0) {
+ return os << "<empty>" << std::endl;
+ } else {
+ os << std::endl;
+ }
+
+ os << indent;
+ for (unsigned int i = 0; i < value.count_; i++) {
+ os << begl << "[" << i << "] " << &value.items_[i];
+ }
+ return os << outdent;
+}
+
+std::ostream& operator<<(std::ostream& os, const AVProgram *value) {
+ if (value == nullptr) {
+ return os << "<nullptr>" << std::endl;
+ } else {
+ return os << "TODO" << std::endl;
+ }
+}
+
+std::ostream& operator<<(std::ostream& os, const AVProgramArray& value) {
+ if (value.items_ == nullptr) {
+ return os << "<nullptr>" << std::endl;
+ } else if (value.count_ == 0) {
+ return os << "<empty>" << std::endl;
+ } else {
+ os << std::endl;
+ }
+
+ os << indent;
+ for (unsigned int i = 0; i < value.count_; i++) {
+ os << begl << "[" << i << "]" << value.items_[i];
+ }
+ return os << outdent;
+}
+
+std::ostream& operator<<(std::ostream& os, const AVChapter *value) {
+ if (value == nullptr) {
+ return os << "<nullptr>" << std::endl;
+ } else {
+ return os << "TODO" << std::endl;
+ }
+}
+
+std::ostream& operator<<(std::ostream& os, const AVChapterArray& value) {
+ if (value.items_ == nullptr) {
+ return os << "<nullptr>" << std::endl;
+ } else if (value.count_ == 0) {
+ return os << "<empty>" << std::endl;
+ } else {
+ os << std::endl;
+ }
+
+ os << indent;
+ for (unsigned int i = 0; i < value.count_; i++) {
+ os << begl << "[" << i << "]" << value.items_[i];
+ }
+ return os << outdent;
+}
+
+std::ostream& operator<<(std::ostream& os, AVCodecID value) {
+ return os << avcodec_get_name(value) << " (" << static_cast<int>(value) << ")"
+ << std::endl;
+}
+
+std::ostream& operator<<(std::ostream& os, const AVDictionary *value) {
+ if (value == nullptr) {
+ return os << "<nullptr>" << std::endl;
+ }
+ AVDictionaryEntry *entry =
+ av_dict_get(value, "", nullptr, AV_DICT_IGNORE_SUFFIX);
+ if (entry == nullptr) {
+ return os << "<empty>" << std::endl;
+ }
+ os << std::endl;
+
+ os << indent;
+ while (entry != nullptr) {
+ os << begl << safe(entry->key) << ": " << safe(entry->value) << std::endl;
+ entry = av_dict_get(value, "", entry, AV_DICT_IGNORE_SUFFIX);
+ }
+ return os << outdent;
+}
+
+std::ostream& operator<<(std::ostream& os, AVFMT_EVENTFlags value) {
+ if (value.flags_ == 0) {
+ os << "<none>" << std::endl;
+ return os;
+ }
+
+ if (value.flags_ & AVFMT_EVENT_FLAG_METADATA_UPDATED) {
+ return os << "AVFMT_EVENT_FLAG_METADATA_UPDATED" << std::endl;
+ } else {
+ return os << "<UNKNOWN AVFMT_EVENT_FLAG_: " << value.flags_ << ">"
+ << std::endl;
+ }
+}
+
+std::ostream& operator<<(std::ostream& os, AVSTREAM_EVENTFlags value) {
+ if (value.flags_ == 0) {
+ os << "<none>" << std::endl;
+ return os;
+ }
+
+ if (value.flags_ & AVSTREAM_EVENT_FLAG_METADATA_UPDATED) {
+ return os << "AVSTREAM_EVENT_FLAG_METADATA_UPDATED" << std::endl;
+ } else {
+ return os << "<UNKNOWN AVSTREAM_EVENT_FLAG_: " << value.flags_ << ">"
+ << std::endl;
+ }
+}
+
+std::ostream& operator<<(std::ostream& os, AVFMT_AVOID_NEG_TSFlags value) {
+ switch (value.flags_) {
+ case AVFMT_AVOID_NEG_TS_AUTO:
+ return os << "AVFMT_AVOID_NEG_TS_AUTO" << std::endl;
+ case AVFMT_AVOID_NEG_TS_MAKE_NON_NEGATIVE:
+ return os << "AVFMT_AVOID_NEG_TS_MAKE_NON_NEGATIVE" << std::endl;
+ case AVFMT_AVOID_NEG_TS_MAKE_ZERO:
+ return os << "AVFMT_AVOID_NEG_TS_MAKE_ZERO" << std::endl;
+ default:
+ return os << "<UNKNOWN AVFMT_AVOID_NEG_TS_: " << value.flags_ << ">"
+ << std::endl;
+ }
+}
+
+std::ostream& operator<<(std::ostream& os, AVMediaType value) {
+ switch (value) {
+ case AVMEDIA_TYPE_UNKNOWN:
+ return os << "AVMEDIA_TYPE_UNKNOWN" << std::endl;
+ case AVMEDIA_TYPE_VIDEO:
+ return os << "AVMEDIA_TYPE_VIDEO" << std::endl;
+ case AVMEDIA_TYPE_AUDIO:
+ return os << "AVMEDIA_TYPE_AUDIO" << std::endl;
+ case AVMEDIA_TYPE_DATA:
+ return os << "AVMEDIA_TYPE_DATA" << std::endl;
+ case AVMEDIA_TYPE_SUBTITLE:
+ return os << "AVMEDIA_TYPE_SUBTITLE" << std::endl;
+ case AVMEDIA_TYPE_ATTACHMENT:
+ return os << "AVMEDIA_TYPE_ATTACHMENT" << std::endl;
+ case AVMEDIA_TYPE_NB:
+ return os << "AVMEDIA_TYPE_NB" << std::endl;
+ default:
+ return os << "<UNKNOWN AVMediaType: " << static_cast<int>(value) << ">"
+ << std::endl;
+ }
+}
+
+std::ostream& operator<<(std::ostream& os, AVSampleFormat value) {
+ switch (value) {
+ case AV_SAMPLE_FMT_NONE:
+ return os << "AV_SAMPLE_FMT_NONE" << std::endl;
+ case AV_SAMPLE_FMT_U8:
+ return os << "AV_SAMPLE_FMT_U8" << std::endl;
+ case AV_SAMPLE_FMT_S16:
+ return os << "AV_SAMPLE_FMT_S16" << std::endl;
+ case AV_SAMPLE_FMT_S32:
+ return os << "AV_SAMPLE_FMT_S32" << std::endl;
+ case AV_SAMPLE_FMT_FLT:
+ return os << "AV_SAMPLE_FMT_FLT" << std::endl;
+ case AV_SAMPLE_FMT_DBL:
+ return os << "AV_SAMPLE_FMT_DBL" << std::endl;
+ case AV_SAMPLE_FMT_U8P:
+ return os << "AV_SAMPLE_FMT_U8P" << std::endl;
+ case AV_SAMPLE_FMT_S16P:
+ return os << "AV_SAMPLE_FMT_S16P" << std::endl;
+ case AV_SAMPLE_FMT_S32P:
+ return os << "AV_SAMPLE_FMT_S32P" << std::endl;
+ case AV_SAMPLE_FMT_FLTP:
+ return os << "AV_SAMPLE_FMT_FLTP" << std::endl;
+ case AV_SAMPLE_FMT_DBLP:
+ return os << "AV_SAMPLE_FMT_DBLP" << std::endl;
+ case AV_SAMPLE_FMT_NB:
+ return os << "AV_SAMPLE_FMT_NB" << std::endl;
+ default:
+ return os << "<UNKNOWN AVSampleFormat: " << static_cast<int>(value) << ">"
+ << std::endl;
+ }
+}
+
+std::ostream& operator<<(std::ostream& os, AVColorSpace value) {
+ switch (value) {
+ case AVCOL_SPC_RGB:
+ return os << "AVCOL_SPC_RGB" << std::endl;
+ case AVCOL_SPC_BT709:
+ return os << "AVCOL_SPC_BT709" << std::endl;
+ case AVCOL_SPC_UNSPECIFIED:
+ return os << "AVCOL_SPC_UNSPECIFIED" << std::endl;
+ case AVCOL_SPC_RESERVED:
+ return os << "AVCOL_SPC_RESERVED" << std::endl;
+ case AVCOL_SPC_FCC:
+ return os << "AVCOL_SPC_FCC" << std::endl;
+ case AVCOL_SPC_BT470BG:
+ return os << "AVCOL_SPC_BT470BG" << std::endl;
+ case AVCOL_SPC_SMPTE170M:
+ return os << "AVCOL_SPC_SMPTE170M" << std::endl;
+ case AVCOL_SPC_SMPTE240M:
+ return os << "AVCOL_SPC_SMPTE240M" << std::endl;
+ case AVCOL_SPC_YCOCG:
+ return os << "AVCOL_SPC_YCOCG" << std::endl;
+ case AVCOL_SPC_BT2020_NCL:
+ return os << "AVCOL_SPC_BT2020_NCL" << std::endl;
+ case AVCOL_SPC_BT2020_CL:
+ return os << "AVCOL_SPC_BT2020_CL" << std::endl;
+ case AVCOL_SPC_NB:
+ return os << "AVCOL_SPC_NB" << std::endl;
+ default:
+ return os << "<UNKNOWN AVColorSpace: " << static_cast<int>(value) << ">"
+ << std::endl;
+ }
+}
+
+std::ostream& operator<<(std::ostream& os, enum AVDiscard value) {
+ switch (value) {
+ case AVDISCARD_NONE:
+ return os << "AVDISCARD_NONE" << std::endl;
+ case AVDISCARD_DEFAULT:
+ return os << "AVDISCARD_DEFAULT" << std::endl;
+ case AVDISCARD_NONREF:
+ return os << "AVDISCARD_NONREF" << std::endl;
+ case AVDISCARD_BIDIR:
+ return os << "AVDISCARD_BIDIR" << std::endl;
+ case AVDISCARD_NONINTRA:
+ return os << "AVDISCARD_NONINTRA" << std::endl;
+ case AVDISCARD_NONKEY:
+ return os << "AVDISCARD_NONKEY" << std::endl;
+ case AVDISCARD_ALL:
+ return os << "AVDISCARD_ALL" << std::endl;
+ default:
+ return os << "<UNKNOWN AVDISCARD_: " << static_cast<int>(value) << ">"
+ << std::endl;
+ }
+}
+
+std::ostream& operator<<(std::ostream& os, AVDurationEstimationMethod value) {
+ switch (value) {
+ case AVFMT_DURATION_FROM_PTS:
+ return os << "AVFMT_DURATION_FROM_PTS" << std::endl;
+ case AVFMT_DURATION_FROM_STREAM:
+ return os << "AVFMT_DURATION_FROM_STREAM" << std::endl;
+ case AVFMT_DURATION_FROM_BITRATE:
+ return os << "AVFMT_DURATION_FROM_BITRATE" << std::endl;
+ default:
+ return os << "<UNKNOWN AVDurationEstimationMethod: "
+ << static_cast<int>(value) << ">" << std::endl;
+ }
+}
+
+std::ostream& operator<<(std::ostream& os, const AVFormatContext *value) {
+ if (value == nullptr) {
+ return os << "<nullptr>" << std::endl;
+ } else {
+ os << std::endl;
+ }
+
+ os << indent;
+ os << begl << "AVInputFormat *iformat: " << value->iformat;
+ os << begl << "AVOutputFormat *oformat: " << value->oformat;
+ os << begl << "AVIOContext *pb: " << value->pb;
+ os << begl << "int ctx_flags: " << AVFMTCTXFlags(value->ctx_flags);
+ os << begl << "unsigned int nb_streams: " << value->nb_streams << std::endl;
+ os << begl << "AVStream **streams: "
+ << AVStreamArray(value->streams, value->nb_streams);
+ os << begl << "char filename[1024]: " << value->filename << std::endl;
+ os << begl << "int64_t start_time: " << value->start_time << std::endl;
+ os << begl << "int64_t duration: " << value->duration << std::endl;
+ os << begl << "int64_t bit_rate: " << value->bit_rate << std::endl;
+ os << begl << "unsigned int packet_size: " << value->packet_size << std::endl;
+ os << begl << "int max_delay: " << value->max_delay << std::endl;
+ os << begl << "int flags: " << AVFMTFlags(value->flags);
+ os << begl << "int64_t probesize: " << value->probesize << std::endl;
+ os << begl << "unsigned int nb_programs: " << value->nb_programs << std::endl;
+ os << begl << "AVProgram **programs: "
+ << AVProgramArray(value->programs, value->nb_programs);
+ os << begl << "AVCodecID video_codec_id: " << value->video_codec_id;
+ os << begl << "AVCodecID audio_codec_id: " << value->audio_codec_id;
+ os << begl << "AVCodecID subtitle_codec_id: " << value->subtitle_codec_id;
+ os << begl << "unsigned int max_index_size: "
+ << value->max_index_size << std::endl;
+ os << begl << "unsigned int max_picture_buffer: "
+ << value->max_picture_buffer << std::endl;
+ os << begl << "unsigned int nb_chapters: " << value->nb_chapters << std::endl;
+ os << begl << "AVChapter **chapters: "
+ << AVChapterArray(value->chapters, value->nb_chapters);
+ os << begl << "AVDictionary *metadata: " << value->metadata;
+ os << begl << "int64_t start_time_realtime: " << value->start_time_realtime
+ << std::endl;
+ os << begl << "int fps_probe_size: " << value->fps_probe_size << std::endl;
+ os << begl << "int error_recognition: "
+ << value->error_recognition << std::endl;
+ os << begl << "int64_t max_interleave_delta: "
+ << value->max_interleave_delta << std::endl;
+ os << begl << "int strict_std_compliance: "
+ << value->strict_std_compliance << std::endl;
+ os << begl << "int event_flags: " << AVFMT_EVENTFlags(value->flags);
+ os << begl << "int max_ts_probe: " << value->max_ts_probe << std::endl;
+ os << begl << "int avoid_negative_ts: "
+ << AVFMT_AVOID_NEG_TSFlags(value->avoid_negative_ts);
+ os << begl << "int ts_id: " << value->ts_id << std::endl;
+ os << begl << "int audio_preload: " << value->audio_preload << std::endl;
+ os << begl << "int max_chunk_duration: "
+ << value->max_chunk_duration << std::endl;
+ os << begl << "int max_chunk_size: " << value->max_chunk_size << std::endl;
+ os << begl << "int use_wallclock_as_timestamps: "
+ << value->use_wallclock_as_timestamps << std::endl;
+ os << begl << "int avio_flags: " << value->avio_flags << std::endl;
+ os << begl << "AVDurationEstimationMethod duration_estimation_method: "
+ << value->duration_estimation_method;
+ os << begl << "int64_t skip_initial_bytes: " << value->skip_initial_bytes
+ << std::endl;
+ os << begl << "TODO(dalesat): more" << std::endl;
+ return os << outdent;
+}
+
+} // namespace media
+} // namespace mojo
diff --git a/services/media/framework_ffmpeg/ffmpeg_formatting.h b/services/media/framework_ffmpeg/ffmpeg_formatting.h
new file mode 100644
index 0000000..05b1ebe
--- /dev/null
+++ b/services/media/framework_ffmpeg/ffmpeg_formatting.h
@@ -0,0 +1,116 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef SERVICES_MEDIA_FRAMEWORK_FFMPEG_FFMPEG_FORMATTING_H_
+#define SERVICES_MEDIA_FRAMEWORK_FFMPEG_FFMPEG_FORMATTING_H_
+
+#include <ostream>
+
+#include "services/media/framework/formatting.h"
+extern "C" {
+#include "third_party/ffmpeg/libavformat/avformat.h"
+}
+
+namespace mojo {
+namespace media {
+
+// See services/media/framework/ostream.h for details.
+
+std::ostream& operator<<(
+ std::ostream& os,
+ const struct AVCodecTag *const *value);
+std::ostream& operator<<(std::ostream& os, const AVInputFormat *value);
+std::ostream& operator<<(std::ostream& os, const AVOutputFormat *value);
+std::ostream& operator<<(std::ostream& os, const AVIOContext *value);
+std::ostream& operator<<(std::ostream& os, const AVCodecContext *value);
+std::ostream& operator<<(std::ostream& os, const AVCodec *value);
+std::ostream& operator<<(std::ostream& os, const AVRational& value);
+std::ostream& operator<<(std::ostream& os, const AVStream *value);
+std::ostream& operator<<(std::ostream& os, const AVBufferRef *value);
+std::ostream& operator<<(std::ostream& os, const AVFrame *value);
+std::ostream& operator<<(std::ostream& os, const AVPacket *value);
+std::ostream& operator<<(std::ostream& os, const AVPacketSideData *value);
+std::ostream& operator<<(std::ostream& os, const AVProgram *value);
+std::ostream& operator<<(std::ostream& os, const AVChapter *value);
+std::ostream& operator<<(std::ostream& os, AVCodecID value);
+std::ostream& operator<<(std::ostream& os, const AVDictionary *value);
+std::ostream& operator<<(std::ostream& os, enum AVDiscard value);
+std::ostream& operator<<(std::ostream& os, AVDurationEstimationMethod value);
+std::ostream& operator<<(std::ostream& os, const AVFormatContext *value);
+std::ostream& operator<<(std::ostream& os, AVMediaType value);
+std::ostream& operator<<(std::ostream& os, AVSampleFormat value);
+std::ostream& operator<<(std::ostream& os, AVColorSpace value);
+
+struct AVPacketSideDataArray {
+ AVPacketSideDataArray(const AVPacketSideData *items, unsigned int count) :
+ items_(items), count_(count) {}
+ const AVPacketSideData *items_;
+ unsigned int count_;
+};
+std::ostream& operator<<(std::ostream& os, const AVPacketSideDataArray& value);
+
+struct AVProgramArray {
+ AVProgramArray(AVProgram **items, unsigned int count) :
+ items_(items), count_(count) {}
+ AVProgram **items_;
+ unsigned int count_;
+};
+std::ostream& operator<<(std::ostream& os, const AVProgramArray& value);
+
+struct AVChapterArray {
+ AVChapterArray(AVChapter **items, unsigned int count) :
+ items_(items), count_(count) {}
+ AVChapter **items_;
+ unsigned int count_;
+};
+std::ostream& operator<<(std::ostream& os, const AVChapterArray& value);
+
+struct AVStreamArray {
+ AVStreamArray(AVStream **items, unsigned int count) :
+ items_(items), count_(count) {}
+ AVStream **items_;
+ unsigned int count_;
+};
+std::ostream& operator<<(std::ostream& os, const AVStreamArray& value);
+
+struct AVFMTFlags {
+ AVFMTFlags(int flags) : flags_(flags) {}
+ int flags_;
+};
+std::ostream& operator<<(std::ostream& os, AVFMTFlags value);
+
+struct AVFMTCTXFlags {
+ AVFMTCTXFlags(int flags) : flags_(flags) {}
+ int flags_;
+};
+std::ostream& operator<<(std::ostream& os, AVFMTCTXFlags value);
+
+struct AV_DISPOSITIONFlags {
+ AV_DISPOSITIONFlags(int flags) : flags_(flags) {}
+ int flags_;
+};
+std::ostream& operator<<(std::ostream& os, AV_DISPOSITIONFlags value);
+
+struct AVFMT_EVENTFlags {
+ AVFMT_EVENTFlags(int flags) : flags_(flags) {}
+ int flags_;
+};
+std::ostream& operator<<(std::ostream& os, AVFMT_EVENTFlags value);
+
+struct AVSTREAM_EVENTFlags {
+ AVSTREAM_EVENTFlags(int flags) : flags_(flags) {}
+ int flags_;
+};
+std::ostream& operator<<(std::ostream& os, AVSTREAM_EVENTFlags value);
+
+struct AVFMT_AVOID_NEG_TSFlags {
+ AVFMT_AVOID_NEG_TSFlags(int flags) : flags_(flags) {}
+ int flags_;
+};
+std::ostream& operator<<(std::ostream& os, AVFMT_AVOID_NEG_TSFlags value);
+
+} // namespace media
+} // namespace mojo
+
+#endif // SERVICES_MEDIA_FRAMEWORK_FFMPEG_FFMPEG_FORMATTING_H_
diff --git a/services/media/framework_ffmpeg/ffmpeg_init.cc b/services/media/framework_ffmpeg/ffmpeg_init.cc
new file mode 100644
index 0000000..4ce9e62
--- /dev/null
+++ b/services/media/framework_ffmpeg/ffmpeg_init.cc
@@ -0,0 +1,27 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/synchronization/lock.h"
+#include "services/media/framework_ffmpeg/ffmpeg_init.h"
+
+extern "C" {
+#include "third_party/ffmpeg/libavformat/avformat.h"
+}
+
+namespace mojo {
+namespace media {
+
+void InitFfmpeg() {
+ static base::Lock lock_;
+ static bool initialized_ = false;
+
+ base::AutoLock lock(lock_);
+ if (!initialized_) {
+ initialized_ = true;
+ av_register_all();
+ }
+}
+
+} // namespace media
+} // namespace mojo
diff --git a/services/media/framework_ffmpeg/ffmpeg_init.h b/services/media/framework_ffmpeg/ffmpeg_init.h
new file mode 100644
index 0000000..7de4223
--- /dev/null
+++ b/services/media/framework_ffmpeg/ffmpeg_init.h
@@ -0,0 +1,16 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef SERVICES_MEDIA_FRAMEWORK_FFMPEG_FFMPEG_INIT_H_
+#define SERVICES_MEDIA_FRAMEWORK_FFMPEG_FFMPEG_INIT_H_
+
+namespace mojo {
+namespace media {
+
+void InitFfmpeg();
+
+} // namespace media
+} // namespace mojo
+
+#endif // SERVICES_MEDIA_FRAMEWORK_FFMPEG_FFMPEG_INIT_H_
diff --git a/services/media/framework_ffmpeg/ffmpeg_io.cc b/services/media/framework_ffmpeg/ffmpeg_io.cc
new file mode 100644
index 0000000..c15c614
--- /dev/null
+++ b/services/media/framework_ffmpeg/ffmpeg_io.cc
@@ -0,0 +1,99 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/logging.h"
+#include "services/media/framework/parts/reader.h"
+#include "services/media/framework_ffmpeg/ffmpeg_init.h"
+#include "services/media/framework_ffmpeg/ffmpeg_io.h"
+extern "C" {
+#include "third_party/ffmpeg/libavformat/avio.h"
+}
+
+namespace mojo {
+namespace media {
+
+static int AvioRead(void* opaque, uint8_t* buf, int buf_size);
+static int64_t AvioSeek(void* opaque, int64_t offset, int whence);
+
+AvioContextPtr CreateAvioContext(Reader* reader) {
+ // Internal buffer size used by AVIO for reading.
+ const int kBufferSize = 32 * 1024;
+
+ InitFfmpeg();
+
+ AVIOContext* result = avio_alloc_context(
+ static_cast<unsigned char*>(av_malloc(kBufferSize)),
+ kBufferSize,
+ 0, // write_flag
+ reader, // opaque
+ &AvioRead,
+ nullptr,
+ &AvioSeek);
+
+ // Ensure FFmpeg only tries to seek when we know how.
+ result->seekable = reader->CanSeek() ? AVIO_SEEKABLE_NORMAL : 0;
+
+ // Ensure writing is disabled.
+ result->write_flag = 0;
+
+ return AvioContextPtr(result);
+}
+
+// Performs a read operation using the signature required for avio.
+static int AvioRead(void* opaque, uint8_t* buf, int buf_size) {
+ Reader* reader = reinterpret_cast<Reader*>(opaque);
+ int result = reader->Read(buf, buf_size);
+ if (result < 0) {
+ result = AVERROR(EIO);
+ }
+ return result;
+}
+
+// Performs a seek operation using the signature required for avio.
+static int64_t AvioSeek(void* opaque, int64_t offset, int whence) {
+ Reader* reader = reinterpret_cast<Reader*>(opaque);
+
+ if (whence == AVSEEK_SIZE) {
+ int64_t result = reader->GetSize();
+ if (result == -1) {
+ return AVERROR(EIO);
+ }
+ return result;
+ }
+
+ int64_t base;
+ switch (whence) {
+ case SEEK_SET:
+ base = 0;
+ break;
+
+ case SEEK_CUR:
+ base = reader->GetPosition();
+ if (base == -1) {
+ return AVERROR(EIO);
+ }
+ break;
+
+ case SEEK_END:
+ base = reader->GetSize();
+ if (base == -1) {
+ return AVERROR(EIO);
+ }
+ break;
+
+ default:
+ NOTREACHED();
+ return AVERROR(EIO);
+ }
+
+ int64_t result = reader->SetPosition(base + offset);
+ if (result == -1) {
+ return AVERROR(EIO);
+ }
+
+ return result;
+}
+
+} // namespace media
+} // namespace mojo
diff --git a/services/media/framework_ffmpeg/ffmpeg_io.h b/services/media/framework_ffmpeg/ffmpeg_io.h
new file mode 100644
index 0000000..81c1a8c
--- /dev/null
+++ b/services/media/framework_ffmpeg/ffmpeg_io.h
@@ -0,0 +1,31 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef SERVICES_MEDIA_FRAMEWORK_FFMPEG_FFMPEG_IO_H_
+#define SERVICES_MEDIA_FRAMEWORK_FFMPEG_FFMPEG_IO_H_
+
+#include "services/media/framework/parts/reader.h"
+extern "C" {
+#include "third_party/ffmpeg/libavformat/avio.h"
+}
+
+namespace mojo {
+namespace media {
+
+struct AVIOContextDeleter {
+ void operator()(AVIOContext* context) const {
+ av_free(context->buffer);
+ av_free(context);
+ }
+};
+
+using AvioContextPtr = std::unique_ptr<AVIOContext, AVIOContextDeleter>;
+
+// Creates an ffmpeg avio_context for a given reader.
+AvioContextPtr CreateAvioContext(Reader* reader);
+
+} // namespace media
+} // namespace mojo
+
+#endif // SERVICES_MEDIA_FRAMEWORK_FFMPEG_FFMPEG_IO_H_
diff --git a/services/media/framework_ffmpeg/ffmpeg_type_converters.cc b/services/media/framework_ffmpeg/ffmpeg_type_converters.cc
new file mode 100644
index 0000000..9b421ec
--- /dev/null
+++ b/services/media/framework_ffmpeg/ffmpeg_type_converters.cc
@@ -0,0 +1,385 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/logging.h"
+#include "services/media/framework_ffmpeg/ffmpeg_type_converters.h"
+extern "C" {
+#include "third_party/ffmpeg/libavformat/avformat.h"
+}
+
+// Ffmeg defines this...undefine.
+#undef PixelFormat
+
+namespace mojo {
+namespace media {
+
+namespace {
+
+// Converts an AVSampleFormat into an LpcmStreamType::SampleFormat.
+LpcmStreamType::SampleFormat Convert(AVSampleFormat av_sample_format) {
+ switch (av_sample_format) {
+ case AV_SAMPLE_FMT_U8:
+ case AV_SAMPLE_FMT_U8P:
+ return LpcmStreamType::SampleFormat::kUnsigned8;
+ case AV_SAMPLE_FMT_S16:
+ case AV_SAMPLE_FMT_S16P:
+ return LpcmStreamType::SampleFormat::kSigned16;
+ case AV_SAMPLE_FMT_S32:
+ case AV_SAMPLE_FMT_S32P:
+ return LpcmStreamType::SampleFormat::kSigned24In32;
+ case AV_SAMPLE_FMT_FLT:
+ case AV_SAMPLE_FMT_FLTP:
+ return LpcmStreamType::SampleFormat::kFloat;
+ case AV_SAMPLE_FMT_NONE:
+ case AV_SAMPLE_FMT_DBL:
+ case AV_SAMPLE_FMT_DBLP:
+ case AV_SAMPLE_FMT_NB:
+ default:
+ NOTREACHED() << "unsupported av_sample_format " << av_sample_format;
+ return LpcmStreamType::SampleFormat::kUnknown;
+ }
+}
+
+// Copies a buffer from Bytes into context->extradata. The result is malloc'ed
+// and must be freed.
+void ExtraDataFromBytes(const Bytes& bytes, const AvCodecContextPtr& context) {
+ size_t byte_count = bytes.size();
+ uint8_t* copy = reinterpret_cast<uint8_t*>(malloc(byte_count));
+ std::memcpy(copy, bytes.data(), byte_count);
+ context->extradata = copy;
+ context->extradata_size = byte_count;
+}
+
+// Creates a StreamType from an AVCodecContext describing an LPCM type.
+std::unique_ptr<StreamType> StreamTypeFromLpcmCodecContext(
+ const AVCodecContext& from) {
+ return LpcmStreamType::Create(
+ Convert(from.sample_fmt),
+ from.channels,
+ from.sample_rate);
+}
+
+// Creates a StreamType from an AVCodecContext describing a compressed audio
+// type.
+std::unique_ptr<StreamType>
+StreamTypeFromCompressedAudioCodecContext(const AVCodecContext& from) {
+ CompressedAudioStreamType::AudioEncoding encoding;
+ switch (from.codec_id) {
+ case CODEC_ID_VORBIS:
+ encoding = CompressedAudioStreamType::AudioEncoding::kVorbis;
+ break;
+ default:
+ encoding = CompressedAudioStreamType::AudioEncoding::kUnknown;
+ break;
+ }
+
+ return CompressedAudioStreamType::Create(
+ encoding,
+ Convert(from.sample_fmt),
+ from.channels,
+ from.sample_rate,
+ from.extradata_size == 0 ?
+ nullptr :
+ Bytes::Create(from.extradata, from.extradata_size));
+}
+
+// Converts AVColorSpace and AVColorRange to ColorSpace.
+VideoStreamType::ColorSpace ColorSpaceFromAVColorSpaceAndRange(
+ AVColorSpace color_space,
+ AVColorRange color_range) {
+ // TODO(dalesat): Blindly copied from Chromium.
+ if (color_range == AVCOL_RANGE_JPEG) {
+ return VideoStreamType::ColorSpace::kJpeg;
+ }
+
+ switch (color_space) {
+ case AVCOL_SPC_UNSPECIFIED:
+ return VideoStreamType::ColorSpace::kNotApplicable;
+ case AVCOL_SPC_BT709:
+ return VideoStreamType::ColorSpace::kHdRec709;
+ case AVCOL_SPC_SMPTE170M:
+ case AVCOL_SPC_BT470BG:
+ return VideoStreamType::ColorSpace::kSdRec601;
+ default:
+ return VideoStreamType::ColorSpace::kUnknown;
+ }
+}
+
+// Converts VideoProfile to an ffmpeg profile.
+int FfmpegProfileFromVideoProfile(VideoStreamType::VideoProfile video_profile) {
+ // TODO(dalesat): Blindly copied from Chromium.
+ switch (video_profile) {
+ case VideoStreamType::VideoProfile::kH264Baseline:
+ return FF_PROFILE_H264_BASELINE;
+ case VideoStreamType::VideoProfile::kH264Main:
+ return FF_PROFILE_H264_MAIN;
+ case VideoStreamType::VideoProfile::kH264Extended:
+ return FF_PROFILE_H264_EXTENDED;
+ case VideoStreamType::VideoProfile::kH264High:
+ return FF_PROFILE_H264_HIGH;
+ case VideoStreamType::VideoProfile::kH264High10:
+ return FF_PROFILE_H264_HIGH_10;
+ case VideoStreamType::VideoProfile::kH264High422:
+ return FF_PROFILE_H264_HIGH_422;
+ case VideoStreamType::VideoProfile::kH264High444Predictive:
+ return FF_PROFILE_H264_HIGH_444_PREDICTIVE;
+ case VideoStreamType::VideoProfile::kUnknown:
+ case VideoStreamType::VideoProfile::kNotApplicable:
+ case VideoStreamType::VideoProfile::kH264ScalableBaseline:
+ case VideoStreamType::VideoProfile::kH264ScalableHigh:
+ case VideoStreamType::VideoProfile::kH264StereoHigh:
+ case VideoStreamType::VideoProfile::kH264MultiviewHigh:
+ default:
+ return FF_PROFILE_UNKNOWN;
+ }
+}
+
+// Converts an AVPixelFormat to a PixelFormat.
+VideoStreamType::PixelFormat PixelFormatFromAVPixelFormat(
+ AVPixelFormat av_pixel_format) {
+ // TODO(dalesat): Blindly copied from Chromium.
+ switch (av_pixel_format) {
+ case AV_PIX_FMT_YUV422P:
+ case AV_PIX_FMT_YUVJ422P:
+ return VideoStreamType::PixelFormat::kYv16;
+ case AV_PIX_FMT_YUV444P:
+ case AV_PIX_FMT_YUVJ444P:
+ return VideoStreamType::PixelFormat::kYv24;
+ case AV_PIX_FMT_YUV420P:
+ case AV_PIX_FMT_YUVJ420P:
+ return VideoStreamType::PixelFormat::kYv12;
+ case AV_PIX_FMT_YUVA420P:
+ return VideoStreamType::PixelFormat::kYv12A;
+ default:
+ return VideoStreamType::PixelFormat::kUnknown;
+ }
+}
+
+// Converts a PixelFormat to an AVPixelFormat.
+AVPixelFormat AVPixelFormatFromPixelFormat(
+ VideoStreamType::PixelFormat pixel_format) {
+ // TODO(dalesat): Blindly copied from Chromium.
+ switch (pixel_format) {
+ case VideoStreamType::PixelFormat::kYv12:
+ return AV_PIX_FMT_YUV420P;
+ case VideoStreamType::PixelFormat::kYv16:
+ return AV_PIX_FMT_YUV422P;
+ case VideoStreamType::PixelFormat::kYv12A:
+ return AV_PIX_FMT_YUVA420P;
+ case VideoStreamType::PixelFormat::kYv24:
+ return AV_PIX_FMT_YUV444P;
+ case VideoStreamType::PixelFormat::kUnknown:
+ case VideoStreamType::PixelFormat::kI420:
+ case VideoStreamType::PixelFormat::kNv12:
+ case VideoStreamType::PixelFormat::kNv21:
+ case VideoStreamType::PixelFormat::kUyvy:
+ case VideoStreamType::PixelFormat::kYuy2:
+ case VideoStreamType::PixelFormat::kArgb:
+ case VideoStreamType::PixelFormat::kXrgb:
+ case VideoStreamType::PixelFormat::kRgb24:
+ case VideoStreamType::PixelFormat::kRgb32:
+ case VideoStreamType::PixelFormat::kMjpeg:
+ case VideoStreamType::PixelFormat::kMt21:
+ default:
+ return AV_PIX_FMT_NONE;
+ }
+}
+
+// Creates a StreamType from an AVCodecContext describing a video type.
+std::unique_ptr<StreamType> StreamTypeFromVideoCodecContext(
+ const AVCodecContext& from) {
+ VideoStreamType::VideoEncoding encoding;
+ switch (from.codec_id) {
+ case AV_CODEC_ID_THEORA :
+ encoding = VideoStreamType::VideoEncoding::kTheora;
+ break;
+ case CODEC_ID_VP8:
+ encoding = VideoStreamType::VideoEncoding::kVp8;
+ break;
+ default:
+ encoding = VideoStreamType::VideoEncoding::kUnknown;
+ break;
+ }
+
+ return VideoStreamType::Create(
+ encoding,
+ VideoStreamType::VideoProfile::kNotApplicable,
+ PixelFormatFromAVPixelFormat(from.pix_fmt),
+ ColorSpaceFromAVColorSpaceAndRange(from.colorspace, from.color_range),
+ from.width,
+ from.height,
+ from.coded_width,
+ from.coded_height,
+ from.extradata_size == 0 ?
+ nullptr :
+ Bytes::Create(from.extradata, from.extradata_size));
+}
+
+// Creates a StreamType from an AVCodecContext describing a data type.
+std::unique_ptr<StreamType> StreamTypeFromDataCodecContext(
+ const AVCodecContext& from) {
+ // TODO(dalesat): Implement.
+ return StreamType::Create(StreamType::Scheme::kUnknown);
+}
+
+// Creates a StreamType from an AVCodecContext describing a subtitle type.
+std::unique_ptr<StreamType> StreamTypeFromSubtitleCodecContext(
+ const AVCodecContext& from) {
+ // TODO(dalesat): Implement.
+ return StreamType::Create(StreamType::Scheme::kUnknown);
+}
+
+// Creates an AVCodecContext from LpcmStreamType.
+AvCodecContextPtr CodecContextFromLpcmDetails(
+ const LpcmStreamType& stream_type) {
+ AVCodecID codec_id;
+ AVSampleFormat sample_format;
+
+ switch (stream_type.sample_format()) {
+ case LpcmStreamType::SampleFormat::kUnsigned8:
+ codec_id = AV_CODEC_ID_PCM_U8;
+ sample_format = AV_SAMPLE_FMT_U8;
+ break;
+ case LpcmStreamType::SampleFormat::kSigned16:
+ codec_id = AV_CODEC_ID_PCM_S16LE;
+ sample_format = AV_SAMPLE_FMT_S16;
+ break;
+ case LpcmStreamType::SampleFormat::kSigned24In32:
+ codec_id = AV_CODEC_ID_PCM_S24LE;
+ sample_format = AV_SAMPLE_FMT_S32;
+ break;
+ case LpcmStreamType::SampleFormat::kFloat:
+ codec_id = AV_CODEC_ID_PCM_F32LE;
+ sample_format = AV_SAMPLE_FMT_FLT;
+ break;
+ default:
+ return nullptr;
+ }
+
+ AvCodecContextPtr context(avcodec_alloc_context3(nullptr));
+
+ context->codec_type = AVMEDIA_TYPE_AUDIO;
+ context->codec_id = codec_id;
+ context->sample_fmt = sample_format;
+ context->channels = stream_type.channels();
+ context->sample_rate = stream_type.frames_per_second();
+
+ return context;
+}
+
+// Creates an AVCodecContext from CompressedAudioStreamType.
+AvCodecContextPtr AVCodecContextFromCompressedAudioStreamType(
+ const CompressedAudioStreamType& stream_type) {
+ AVCodecID codec_id = AV_CODEC_ID_NONE;
+ AVSampleFormat sample_format;
+
+ switch (stream_type.encoding()) {
+ case CompressedAudioStreamType::AudioEncoding::kVorbis:
+ codec_id = AV_CODEC_ID_VORBIS;
+ sample_format = AV_SAMPLE_FMT_S16;
+ break;
+ default:
+ return nullptr;
+ }
+
+ if (codec_id == AV_CODEC_ID_NONE) {
+ return nullptr;
+ }
+
+ AvCodecContextPtr context(avcodec_alloc_context3(nullptr));
+
+ context->codec_type = AVMEDIA_TYPE_AUDIO;
+ context->codec_id = codec_id;
+ context->sample_fmt = sample_format;
+ context->channels = stream_type.channels();
+ context->sample_rate = stream_type.frames_per_second();
+
+ if (stream_type.encoding_details()) {
+ ExtraDataFromBytes(*stream_type.encoding_details(), context);
+ }
+
+ return context;
+}
+
+// Creats an AVCodecContext from VideoStreamTypeDetails.
+AvCodecContextPtr AVCodecContextFromVideoStreamType(
+ const VideoStreamType& stream_type) {
+ AVCodecID codec_id = AV_CODEC_ID_NONE;
+
+ // TODO(dalesat): codec_id
+
+ if (codec_id == AV_CODEC_ID_NONE) {
+ return nullptr;
+ }
+
+ AvCodecContextPtr context(avcodec_alloc_context3(nullptr));
+
+ context->codec_type = AVMEDIA_TYPE_VIDEO;
+ context->codec_id = codec_id;
+ context->profile = FfmpegProfileFromVideoProfile(stream_type.profile());
+ context->pix_fmt = AVPixelFormatFromPixelFormat(stream_type.pixel_format());
+ if (stream_type.color_space() == VideoStreamType::ColorSpace::kJpeg) {
+ context->color_range = AVCOL_RANGE_JPEG;
+ }
+ context->coded_width = stream_type.coded_width();
+ context->coded_height = stream_type.coded_height();
+
+ if (stream_type.encoding_details()) {
+ ExtraDataFromBytes(*stream_type.encoding_details(), context);
+ }
+
+ return context;
+}
+
+} // namespace
+
+std::unique_ptr<StreamType> StreamTypeFromAVCodecContext(
+ const AVCodecContext& from) {
+ switch (from.codec_type) {
+ case AVMEDIA_TYPE_AUDIO:
+ switch (from.codec_id) {
+ case CODEC_ID_PCM_S16BE:
+ case CODEC_ID_PCM_S16LE:
+ case CODEC_ID_PCM_S24BE:
+ case CODEC_ID_PCM_S24LE:
+ case CODEC_ID_PCM_U8:
+ return StreamTypeFromLpcmCodecContext(from);
+ default:
+ if (from.codec == nullptr) {
+ return StreamTypeFromCompressedAudioCodecContext(from);
+ } else {
+ return StreamTypeFromLpcmCodecContext(from);
+ }
+ }
+ case AVMEDIA_TYPE_VIDEO:
+ return StreamTypeFromVideoCodecContext(from);
+ case AVMEDIA_TYPE_UNKNOWN:
+ // Treated as AVMEDIA_TYPE_DATA.
+ case AVMEDIA_TYPE_DATA:
+ return StreamTypeFromDataCodecContext(from);
+ case AVMEDIA_TYPE_SUBTITLE:
+ return StreamTypeFromSubtitleCodecContext(from);
+ case AVMEDIA_TYPE_ATTACHMENT:
+ case AVMEDIA_TYPE_NB:
+ default:
+ return StreamType::Create(StreamType::Scheme::kUnknown);
+ }
+}
+
+AvCodecContextPtr AVCodecContextFromStreamType(const StreamType& stream_type) {
+ switch (stream_type.scheme()) {
+ case StreamType::Scheme::kLpcm:
+ return CodecContextFromLpcmDetails(*stream_type.lpcm());
+ case StreamType::Scheme::kCompressedAudio:
+ return AVCodecContextFromCompressedAudioStreamType(
+ *stream_type.compressed_audio());
+ case StreamType::Scheme::kVideo:
+ return AVCodecContextFromVideoStreamType(*stream_type.video());
+ default:
+ return nullptr;
+ }
+}
+
+} // namespace media
+} // namespace mojo
diff --git a/services/media/framework_ffmpeg/ffmpeg_type_converters.h b/services/media/framework_ffmpeg/ffmpeg_type_converters.h
new file mode 100644
index 0000000..f20d892
--- /dev/null
+++ b/services/media/framework_ffmpeg/ffmpeg_type_converters.h
@@ -0,0 +1,39 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef SERVICES_MEDIA_FRAMEWORK_FFMPEG_FFMPEG_TYPE_CONVERTERS_H_
+#define SERVICES_MEDIA_FRAMEWORK_FFMPEG_FFMPEG_TYPE_CONVERTERS_H_
+
+#include "services/media/framework/stream_type.h"
+extern "C" {
+#include "third_party/ffmpeg/libavformat/avformat.h"
+}
+
+namespace mojo {
+namespace media {
+
+struct AVCodecContextDeleter {
+ void operator()(AVCodecContext* context) const {
+ avcodec_free_context(&context);
+ }
+};
+
+using AvCodecContextPtr =
+ std::unique_ptr<AVCodecContext, AVCodecContextDeleter>;
+
+// We don't specialize TypeConverter here, because specializations have to
+// occur in the same namespace as the original template (mojo::, in this case).
+
+// Creates a MediaType from an AVCodecContext.
+std::unique_ptr<StreamType> StreamTypeFromAVCodecContext(
+ const AVCodecContext& from);
+
+// Creates an AVCodecContext from a StreamType.
+AvCodecContextPtr AVCodecContextFromStreamType(
+ const StreamType& stream_type);
+
+} // namespace media
+} // namespace mojo
+
+#endif // SERVICES_MEDIA_FRAMEWORK_FFMPEG_FFMPEG_TYPE_CONVERTERS_H_
diff --git a/services/media/framework_ffmpeg/ffmpeg_video_decoder.cc b/services/media/framework_ffmpeg/ffmpeg_video_decoder.cc
new file mode 100644
index 0000000..b7a7676
--- /dev/null
+++ b/services/media/framework_ffmpeg/ffmpeg_video_decoder.cc
@@ -0,0 +1,110 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/logging.h"
+#include "services/media/framework_ffmpeg/ffmpeg_video_decoder.h"
+
+namespace mojo {
+namespace media {
+
+FfmpegVideoDecoder::FfmpegVideoDecoder(AvCodecContextPtr av_codec_context) :
+ FfmpegDecoderBase(std::move(av_codec_context)) {
+ DCHECK(context());
+}
+
+FfmpegVideoDecoder::~FfmpegVideoDecoder() {}
+
+int FfmpegVideoDecoder::Decode(
+ PayloadAllocator* allocator,
+ bool* frame_decoded_out) {
+ DCHECK(allocator);
+ DCHECK(frame_decoded_out);
+ DCHECK(context());
+ DCHECK(frame());
+
+ int frame_decoded = 0;
+ int input_bytes_used = avcodec_decode_video2(
+ context().get(),
+ frame().get(),
+ &frame_decoded,
+ &packet());
+ *frame_decoded_out = frame_decoded != 0;
+ return input_bytes_used;
+}
+
+PacketPtr FfmpegVideoDecoder::CreateOutputPacket(PayloadAllocator* allocator) {
+ DCHECK(allocator);
+ DCHECK(frame());
+
+ // End of stream is indicated when we're draining and produce no packet.
+ // TODO(dalesat): This is just a copy of the audio version.
+ return Packet::Create(
+ frame()->pts,
+ frame()->pkt_duration,
+ false,
+ packet_size_,
+ frame()->data[0],
+ allocator);
+}
+
+PacketPtr FfmpegVideoDecoder::CreateOutputEndOfStreamPacket() {
+ // TODO(dalesat): Presentation time for this packet.
+ return Packet::CreateEndOfStream(0);
+}
+
+int FfmpegVideoDecoder::AllocateBufferForAvFrame(
+ AVCodecContext* av_codec_context,
+ AVFrame* av_frame,
+ int flags) {
+ // It's important to use av_codec_context here rather than context(),
+ // because av_codec_context is different for different threads when we're
+ // decoding on multiple threads. If this code is moved to an instance method,
+ // be sure to avoid using context().
+
+ // TODO(dalesat): Not sure why/if this is needed.
+ //int result = av_image_check_size(
+ // av_codec_context->width,
+ // av_codec_context->height,
+ // 0,
+ // NULL);
+ //if (result < 0) {
+ // DCHECK(false) << "av_image_check_size failed";
+ // return result;
+ //}
+
+ // TODO(dalesat): Not sure why this is needed.
+ int coded_width =
+ std::max(av_codec_context->width, av_codec_context->coded_width);
+ int coded_height =
+ std::max(av_codec_context->height, av_codec_context->coded_height);
+ DCHECK_EQ(coded_width, av_codec_context->coded_width) <<
+ "coded width is less than width";
+ DCHECK_EQ(coded_height, av_codec_context->coded_height) <<
+ "coded height is less than height";
+
+ // TODO(dalesat): Fill in av_frame->data and av_frame->data for each plane.
+
+ av_frame->width = coded_width;
+ av_frame->height = coded_height;
+ av_frame->format = av_codec_context->pix_fmt;
+ av_frame->reordered_opaque = av_codec_context->reordered_opaque;
+
+ av_frame->buf[0] = av_buffer_create(
+ av_frame->data[0], // Because this is the first chunk in the buffer.
+ 0, // TODO(dalesat): Provide this.
+ ReleaseBufferForAvFrame,
+ nullptr, // opaque
+ 0); // flags
+
+ return 0;
+}
+
+void FfmpegVideoDecoder::ReleaseBufferForAvFrame(
+ void* opaque, uint8_t* buffer) {
+ // Nothing to do.
+ // TODO(dalesat): Can we get rid of this method altogether?
+}
+
+} // namespace media
+} // namespace mojo
diff --git a/services/media/framework_ffmpeg/ffmpeg_video_decoder.h b/services/media/framework_ffmpeg/ffmpeg_video_decoder.h
new file mode 100644
index 0000000..6e3d059
--- /dev/null
+++ b/services/media/framework_ffmpeg/ffmpeg_video_decoder.h
@@ -0,0 +1,51 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef SERVICES_MEDIA_FRAMEWORK_FFMPEG_FFMPEG_VIDEO_DECODER_H_
+#define SERVICES_MEDIA_FRAMEWORK_FFMPEG_FFMPEG_VIDEO_DECODER_H_
+
+#include "services/media/framework_ffmpeg/ffmpeg_decoder_base.h"
+
+namespace mojo {
+namespace media {
+
+// Decoder implementation employing and ffmpeg video decoder.
+// TODO(dalesat): Complete this.
+class FfmpegVideoDecoder : public FfmpegDecoderBase {
+ public:
+ FfmpegVideoDecoder(AvCodecContextPtr av_codec_context);
+
+ ~FfmpegVideoDecoder() override;
+
+ protected:
+ // FfmpegDecoderBase overrides.
+ int Decode(PayloadAllocator* allocator, bool* frame_decoded_out) override;
+
+ PacketPtr CreateOutputPacket(PayloadAllocator* allocator) override;
+
+ PacketPtr CreateOutputEndOfStreamPacket() override;
+
+ private:
+ // Callback used by the ffmpeg decoder to acquire a buffer.
+ static int AllocateBufferForAvFrame(
+ AVCodecContext* av_codec_context,
+ AVFrame* av_frame,
+ int flags);
+
+ // Callback used by the ffmpeg decoder to release a buffer.
+ static void ReleaseBufferForAvFrame(void* opaque, uint8_t* buffer);
+
+ // AllocateBufferForAvFrame deposits the packet size here, because there's
+ // no good evidence of it after avcodec_decode_audio4 completes.
+ uint64_t packet_size_;
+
+ // This is used to verify that an allocated buffer is being used as expected
+ // by ffmpeg avcodec_decode_audio4. AllocateBufferForAvFrame sets it.
+ //void* packet_buffer_;
+};
+
+} // namespace media
+} // namespace mojo
+
+#endif // SERVICES_MEDIA_FRAMEWORK_FFMPEG_FFMPEG_VIDEO_DECODER_H_