--- a/media/webrtc/trunk/webrtc/base/base.gyp
+++ b/media/webrtc/trunk/webrtc/base/base.gyp
@@ -24,18 +24,23 @@
}],
],
'targets': [
{
# The subset of rtc_base approved for use outside of libjingle.
'target_name': 'rtc_base_approved',
'type': 'static_library',
'sources': [
+ 'bitbuffer.cc',
+ 'bitbuffer.h',
+ 'buffer.cc',
+ 'buffer.h',
'checks.cc',
'checks.h',
+ 'constructormagic.h',
'event.cc',
'event.h',
'event_tracer.cc',
'event_tracer.h',
'exp_filter.cc',
'exp_filter.h',
'md5.cc',
'md5.h',
new file mode 100644
--- /dev/null
+++ b/media/webrtc/trunk/webrtc/base/bitbuffer.cc
@@ -0,0 +1,296 @@
+/*
+ * Copyright 2015 The WebRTC Project Authors. All rights reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "webrtc/base/bitbuffer.h"
+
+#include <algorithm>
+#include <limits>
+
+#include "webrtc/base/checks.h"
+
+namespace {
+
+// Returns the lowest (right-most) |bit_count| bits in |byte|.
+uint8_t LowestBits(uint8_t byte, size_t bit_count) {
+ RTC_DCHECK_LE(bit_count, 8u);
+ return byte & ((1 << bit_count) - 1);
+}
+
+// Returns the highest (left-most) |bit_count| bits in |byte|, shifted to the
+// lowest bits (to the right).
+uint8_t HighestBits(uint8_t byte, size_t bit_count) {
+ RTC_DCHECK_LE(bit_count, 8u);
+ uint8_t shift = 8 - static_cast<uint8_t>(bit_count);
+ uint8_t mask = 0xFF << shift;
+ return (byte & mask) >> shift;
+}
+
+// Returns the highest byte of |val| in a uint8_t.
+uint8_t HighestByte(uint64_t val) {
+ return static_cast<uint8_t>(val >> 56);
+}
+
+// Returns the result of writing partial data from |source|, of
+// |source_bit_count| size in the highest bits, to |target| at
+// |target_bit_offset| from the highest bit.
+uint8_t WritePartialByte(uint8_t source,
+ size_t source_bit_count,
+ uint8_t target,
+ size_t target_bit_offset) {
+ RTC_DCHECK(target_bit_offset < 8);
+ RTC_DCHECK(source_bit_count < 9);
+ RTC_DCHECK(source_bit_count <= (8 - target_bit_offset));
+ // Generate a mask for just the bits we're going to overwrite, so:
+ uint8_t mask =
+ // The number of bits we want, in the most significant bits...
+ static_cast<uint8_t>(0xFF << (8 - source_bit_count))
+ // ...shifted over to the target offset from the most signficant bit.
+ >> target_bit_offset;
+
+ // We want the target, with the bits we'll overwrite masked off, or'ed with
+ // the bits from the source we want.
+ return (target & ~mask) | (source >> target_bit_offset);
+}
+
+// Counts the number of bits used in the binary representation of val.
+size_t CountBits(uint64_t val) {
+ size_t bit_count = 0;
+ while (val != 0) {
+ bit_count++;
+ val >>= 1;
+ }
+ return bit_count;
+}
+
+} // namespace
+
+namespace rtc {
+
+BitBuffer::BitBuffer(const uint8_t* bytes, size_t byte_count)
+ : bytes_(bytes), byte_count_(byte_count), byte_offset_(), bit_offset_() {
+ RTC_DCHECK(static_cast<uint64_t>(byte_count_) <=
+ std::numeric_limits<uint32_t>::max());
+}
+
+uint64_t BitBuffer::RemainingBitCount() const {
+ return (static_cast<uint64_t>(byte_count_) - byte_offset_) * 8 - bit_offset_;
+}
+
+bool BitBuffer::ReadUInt8(uint8_t* val) {
+ uint32_t bit_val;
+ if (!ReadBits(&bit_val, sizeof(uint8_t) * 8)) {
+ return false;
+ }
+ RTC_DCHECK(bit_val <= std::numeric_limits<uint8_t>::max());
+ *val = static_cast<uint8_t>(bit_val);
+ return true;
+}
+
+bool BitBuffer::ReadUInt16(uint16_t* val) {
+ uint32_t bit_val;
+ if (!ReadBits(&bit_val, sizeof(uint16_t) * 8)) {
+ return false;
+ }
+ RTC_DCHECK(bit_val <= std::numeric_limits<uint16_t>::max());
+ *val = static_cast<uint16_t>(bit_val);
+ return true;
+}
+
+bool BitBuffer::ReadUInt32(uint32_t* val) {
+ return ReadBits(val, sizeof(uint32_t) * 8);
+}
+
+bool BitBuffer::PeekBits(uint32_t* val, size_t bit_count) {
+ if (!val || bit_count > RemainingBitCount() || bit_count > 32) {
+ return false;
+ }
+ const uint8_t* bytes = bytes_ + byte_offset_;
+ size_t remaining_bits_in_current_byte = 8 - bit_offset_;
+ uint32_t bits = LowestBits(*bytes++, remaining_bits_in_current_byte);
+ // If we're reading fewer bits than what's left in the current byte, just
+ // return the portion of this byte that we need.
+ if (bit_count < remaining_bits_in_current_byte) {
+ *val = HighestBits(bits, bit_offset_ + bit_count);
+ return true;
+ }
+ // Otherwise, subtract what we've read from the bit count and read as many
+ // full bytes as we can into bits.
+ bit_count -= remaining_bits_in_current_byte;
+ while (bit_count >= 8) {
+ bits = (bits << 8) | *bytes++;
+ bit_count -= 8;
+ }
+ // Whatever we have left is smaller than a byte, so grab just the bits we need
+ // and shift them into the lowest bits.
+ if (bit_count > 0) {
+ bits <<= bit_count;
+ bits |= HighestBits(*bytes, bit_count);
+ }
+ *val = bits;
+ return true;
+}
+
+bool BitBuffer::ReadBits(uint32_t* val, size_t bit_count) {
+ return PeekBits(val, bit_count) && ConsumeBits(bit_count);
+}
+
+bool BitBuffer::ConsumeBytes(size_t byte_count) {
+ return ConsumeBits(byte_count * 8);
+}
+
+bool BitBuffer::ConsumeBits(size_t bit_count) {
+ if (bit_count > RemainingBitCount()) {
+ return false;
+ }
+
+ byte_offset_ += (bit_offset_ + bit_count) / 8;
+ bit_offset_ = (bit_offset_ + bit_count) % 8;
+ return true;
+}
+
+bool BitBuffer::ReadExponentialGolomb(uint32_t* val) {
+ if (!val) {
+ return false;
+ }
+ // Store off the current byte/bit offset, in case we want to restore them due
+ // to a failed parse.
+ size_t original_byte_offset = byte_offset_;
+ size_t original_bit_offset = bit_offset_;
+
+ // Count the number of leading 0 bits by peeking/consuming them one at a time.
+ size_t zero_bit_count = 0;
+ uint32_t peeked_bit;
+ while (PeekBits(&peeked_bit, 1) && peeked_bit == 0) {
+ zero_bit_count++;
+ ConsumeBits(1);
+ }
+
+ // We should either be at the end of the stream, or the next bit should be 1.
+ RTC_DCHECK(!PeekBits(&peeked_bit, 1) || peeked_bit == 1);
+
+ // The bit count of the value is the number of zeros + 1. Make sure that many
+ // bits fits in a uint32_t and that we have enough bits left for it, and then
+ // read the value.
+ size_t value_bit_count = zero_bit_count + 1;
+ if (value_bit_count > 32 || !ReadBits(val, value_bit_count)) {
+ RTC_CHECK(Seek(original_byte_offset, original_bit_offset));
+ return false;
+ }
+ *val -= 1;
+ return true;
+}
+
+bool BitBuffer::ReadSignedExponentialGolomb(int32_t* val) {
+ uint32_t unsigned_val;
+ if (!ReadExponentialGolomb(&unsigned_val)) {
+ return false;
+ }
+ if ((unsigned_val & 1) == 0) {
+ *val = -static_cast<int32_t>(unsigned_val / 2);
+ } else {
+ *val = (unsigned_val + 1) / 2;
+ }
+ return true;
+}
+
+void BitBuffer::GetCurrentOffset(
+ size_t* out_byte_offset, size_t* out_bit_offset) {
+ RTC_CHECK(out_byte_offset != NULL);
+ RTC_CHECK(out_bit_offset != NULL);
+ *out_byte_offset = byte_offset_;
+ *out_bit_offset = bit_offset_;
+}
+
+bool BitBuffer::Seek(size_t byte_offset, size_t bit_offset) {
+ if (byte_offset > byte_count_ || bit_offset > 7 ||
+ (byte_offset == byte_count_ && bit_offset > 0)) {
+ return false;
+ }
+ byte_offset_ = byte_offset;
+ bit_offset_ = bit_offset;
+ return true;
+}
+
+BitBufferWriter::BitBufferWriter(uint8_t* bytes, size_t byte_count)
+ : BitBuffer(bytes, byte_count), writable_bytes_(bytes) {
+}
+
+bool BitBufferWriter::WriteUInt8(uint8_t val) {
+ return WriteBits(val, sizeof(uint8_t) * 8);
+}
+
+bool BitBufferWriter::WriteUInt16(uint16_t val) {
+ return WriteBits(val, sizeof(uint16_t) * 8);
+}
+
+bool BitBufferWriter::WriteUInt32(uint32_t val) {
+ return WriteBits(val, sizeof(uint32_t) * 8);
+}
+
+bool BitBufferWriter::WriteBits(uint64_t val, size_t bit_count) {
+ if (bit_count > RemainingBitCount()) {
+ return false;
+ }
+ size_t total_bits = bit_count;
+
+ // For simplicity, push the bits we want to read from val to the highest bits.
+ val <<= (sizeof(uint64_t) * 8 - bit_count);
+
+ uint8_t* bytes = writable_bytes_ + byte_offset_;
+
+ // The first byte is relatively special; the bit offset to write to may put us
+ // in the middle of the byte, and the total bit count to write may require we
+ // save the bits at the end of the byte.
+ size_t remaining_bits_in_current_byte = 8 - bit_offset_;
+ size_t bits_in_first_byte =
+ std::min(bit_count, remaining_bits_in_current_byte);
+ *bytes = WritePartialByte(
+ HighestByte(val), bits_in_first_byte, *bytes, bit_offset_);
+ if (bit_count <= remaining_bits_in_current_byte) {
+ // Nothing left to write, so quit early.
+ return ConsumeBits(total_bits);
+ }
+
+ // Subtract what we've written from the bit count, shift it off the value, and
+ // write the remaining full bytes.
+ val <<= bits_in_first_byte;
+ bytes++;
+ bit_count -= bits_in_first_byte;
+ while (bit_count >= 8) {
+ *bytes++ = HighestByte(val);
+ val <<= 8;
+ bit_count -= 8;
+ }
+
+ // Last byte may also be partial, so write the remaining bits from the top of
+ // val.
+ if (bit_count > 0) {
+ *bytes = WritePartialByte(HighestByte(val), bit_count, *bytes, 0);
+ }
+
+ // All done! Consume the bits we've written.
+ return ConsumeBits(total_bits);
+}
+
+bool BitBufferWriter::WriteExponentialGolomb(uint32_t val) {
+ // We don't support reading UINT32_MAX, because it doesn't fit in a uint32_t
+ // when encoded, so don't support writing it either.
+ if (val == std::numeric_limits<uint32_t>::max()) {
+ return false;
+ }
+ uint64_t val_to_encode = static_cast<uint64_t>(val) + 1;
+
+ // We need to write CountBits(val+1) 0s and then val+1. Since val (as a
+ // uint64_t) has leading zeros, we can just write the total golomb encoded
+ // size worth of bits, knowing the value will appear last.
+ return WriteBits(val_to_encode, CountBits(val_to_encode) * 2 - 1);
+}
+
+} // namespace rtc
new file mode 100644
--- /dev/null
+++ b/media/webrtc/trunk/webrtc/base/bitbuffer.h
@@ -0,0 +1,122 @@
+/*
+ * Copyright 2015 The WebRTC Project Authors. All rights reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef WEBRTC_BASE_BITBUFFER_H_
+#define WEBRTC_BASE_BITBUFFER_H_
+
+#include <stdint.h> // For integer types.
+#include <stddef.h> // For size_t.
+
+#include "webrtc/base/constructormagic.h"
+
+namespace rtc {
+
+// A class, similar to ByteBuffer, that can parse bit-sized data out of a set of
+// bytes. Has a similar API to ByteBuffer, plus methods for reading bit-sized
+// and exponential golomb encoded data. For a writable version, use
+// BitBufferWriter. Unlike ByteBuffer, this class doesn't make a copy of the
+// source bytes, so it can be used on read-only data.
+// Sizes/counts specify bits/bytes, for clarity.
+// Byte order is assumed big-endian/network.
+class BitBuffer {
+ public:
+ BitBuffer(const uint8_t* bytes, size_t byte_count);
+
+ // Gets the current offset, in bytes/bits, from the start of the buffer. The
+ // bit offset is the offset into the current byte, in the range [0,7].
+ void GetCurrentOffset(size_t* out_byte_offset, size_t* out_bit_offset);
+
+ // The remaining bits in the byte buffer.
+ uint64_t RemainingBitCount() const;
+
+ // Reads byte-sized values from the buffer. Returns false if there isn't
+ // enough data left for the specified type.
+ bool ReadUInt8(uint8_t* val);
+ bool ReadUInt16(uint16_t* val);
+ bool ReadUInt32(uint32_t* val);
+
+ // Reads bit-sized values from the buffer. Returns false if there isn't enough
+ // data left for the specified bit count..
+ bool ReadBits(uint32_t* val, size_t bit_count);
+
+ // Peeks bit-sized values from the buffer. Returns false if there isn't enough
+ // data left for the specified number of bits. Doesn't move the current
+ // offset.
+ bool PeekBits(uint32_t* val, size_t bit_count);
+
+ // Reads the exponential golomb encoded value at the current offset.
+ // Exponential golomb values are encoded as:
+ // 1) x = source val + 1
+ // 2) In binary, write [countbits(x) - 1] 0s, then x
+ // To decode, we count the number of leading 0 bits, read that many + 1 bits,
+ // and increment the result by 1.
+ // Returns false if there isn't enough data left for the specified type, or if
+ // the value wouldn't fit in a uint32_t.
+ bool ReadExponentialGolomb(uint32_t* val);
+ // Reads signed exponential golomb values at the current offset. Signed
+ // exponential golomb values are just the unsigned values mapped to the
+ // sequence 0, 1, -1, 2, -2, etc. in order.
+ bool ReadSignedExponentialGolomb(int32_t* val);
+
+ // Moves current position |byte_count| bytes forward. Returns false if
+ // there aren't enough bytes left in the buffer.
+ bool ConsumeBytes(size_t byte_count);
+ // Moves current position |bit_count| bits forward. Returns false if
+ // there aren't enough bits left in the buffer.
+ bool ConsumeBits(size_t bit_count);
+
+ // Sets the current offset to the provied byte/bit offsets. The bit
+ // offset is from the given byte, in the range [0,7].
+ bool Seek(size_t byte_offset, size_t bit_offset);
+
+ protected:
+ const uint8_t* const bytes_;
+ // The total size of |bytes_|.
+ size_t byte_count_;
+ // The current offset, in bytes, from the start of |bytes_|.
+ size_t byte_offset_;
+ // The current offset, in bits, into the current byte.
+ size_t bit_offset_;
+
+ RTC_DISALLOW_COPY_AND_ASSIGN(BitBuffer);
+};
+
+// A BitBuffer API for write operations. Supports symmetric write APIs to the
+// reading APIs of BitBuffer. Note that the read/write offset is shared with the
+// BitBuffer API, so both reading and writing will consume bytes/bits.
+class BitBufferWriter : public BitBuffer {
+ public:
+ // Constructs a bit buffer for the writable buffer of |bytes|.
+ BitBufferWriter(uint8_t* bytes, size_t byte_count);
+
+ // Writes byte-sized values from the buffer. Returns false if there isn't
+ // enough data left for the specified type.
+ bool WriteUInt8(uint8_t val);
+ bool WriteUInt16(uint16_t val);
+ bool WriteUInt32(uint32_t val);
+
+ // Writes bit-sized values to the buffer. Returns false if there isn't enough
+ // room left for the specified number of bits.
+ bool WriteBits(uint64_t val, size_t bit_count);
+
+ // Writes the exponential golomb encoded version of the supplied value.
+ // Returns false if there isn't enough room left for the value.
+ bool WriteExponentialGolomb(uint32_t val);
+
+ private:
+ // The buffer, as a writable array.
+ uint8_t* const writable_bytes_;
+
+ RTC_DISALLOW_COPY_AND_ASSIGN(BitBufferWriter);
+};
+
+} // namespace rtc
+
+#endif // WEBRTC_BASE_BITBUFFER_H_
new file mode 100644
--- /dev/null
+++ b/media/webrtc/trunk/webrtc/base/bitbuffer_unittest.cc
@@ -0,0 +1,330 @@
+/*
+ * Copyright 2015 The WebRTC Project Authors. All rights reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "webrtc/base/arraysize.h"
+#include "webrtc/base/bitbuffer.h"
+#include "webrtc/base/bytebuffer.h"
+#include "webrtc/base/common.h"
+#include "webrtc/base/gunit.h"
+
+namespace rtc {
+
+TEST(BitBufferTest, ConsumeBits) {
+ const uint8_t bytes[64] = {0};
+ BitBuffer buffer(bytes, 32);
+ uint64_t total_bits = 32 * 8;
+ EXPECT_EQ(total_bits, buffer.RemainingBitCount());
+ EXPECT_TRUE(buffer.ConsumeBits(3));
+ total_bits -= 3;
+ EXPECT_EQ(total_bits, buffer.RemainingBitCount());
+ EXPECT_TRUE(buffer.ConsumeBits(3));
+ total_bits -= 3;
+ EXPECT_EQ(total_bits, buffer.RemainingBitCount());
+ EXPECT_TRUE(buffer.ConsumeBits(15));
+ total_bits -= 15;
+ EXPECT_EQ(total_bits, buffer.RemainingBitCount());
+ EXPECT_TRUE(buffer.ConsumeBits(37));
+ total_bits -= 37;
+ EXPECT_EQ(total_bits, buffer.RemainingBitCount());
+
+ EXPECT_FALSE(buffer.ConsumeBits(32 * 8));
+ EXPECT_EQ(total_bits, buffer.RemainingBitCount());
+}
+
+TEST(BitBufferTest, ReadBytesAligned) {
+ const uint8_t bytes[] = {0x0A, 0xBC, 0xDE, 0xF1, 0x23, 0x45, 0x67, 0x89};
+ uint8_t val8;
+ uint16_t val16;
+ uint32_t val32;
+ BitBuffer buffer(bytes, 8);
+ EXPECT_TRUE(buffer.ReadUInt8(&val8));
+ EXPECT_EQ(0x0Au, val8);
+ EXPECT_TRUE(buffer.ReadUInt8(&val8));
+ EXPECT_EQ(0xBCu, val8);
+ EXPECT_TRUE(buffer.ReadUInt16(&val16));
+ EXPECT_EQ(0xDEF1u, val16);
+ EXPECT_TRUE(buffer.ReadUInt32(&val32));
+ EXPECT_EQ(0x23456789u, val32);
+}
+
+TEST(BitBufferTest, ReadBytesOffset4) {
+ const uint8_t bytes[] = {0x0A, 0xBC, 0xDE, 0xF1, 0x23,
+ 0x45, 0x67, 0x89, 0x0A};
+ uint8_t val8;
+ uint16_t val16;
+ uint32_t val32;
+ BitBuffer buffer(bytes, 9);
+ EXPECT_TRUE(buffer.ConsumeBits(4));
+
+ EXPECT_TRUE(buffer.ReadUInt8(&val8));
+ EXPECT_EQ(0xABu, val8);
+ EXPECT_TRUE(buffer.ReadUInt8(&val8));
+ EXPECT_EQ(0xCDu, val8);
+ EXPECT_TRUE(buffer.ReadUInt16(&val16));
+ EXPECT_EQ(0xEF12u, val16);
+ EXPECT_TRUE(buffer.ReadUInt32(&val32));
+ EXPECT_EQ(0x34567890u, val32);
+}
+
+TEST(BitBufferTest, ReadBytesOffset3) {
+ // The pattern we'll check against is counting down from 0b1111. It looks
+ // weird here because it's all offset by 3.
+ // Byte pattern is:
+ // 56701234
+ // 0b00011111,
+ // 0b11011011,
+ // 0b10010111,
+ // 0b01010011,
+ // 0b00001110,
+ // 0b11001010,
+ // 0b10000110,
+ // 0b01000010
+ // xxxxx <-- last 5 bits unused.
+
+ // The bytes. It almost looks like counting down by two at a time, except the
+ // jump at 5->3->0, since that's when the high bit is turned off.
+ const uint8_t bytes[] = {0x1F, 0xDB, 0x97, 0x53, 0x0E, 0xCA, 0x86, 0x42};
+
+ uint8_t val8;
+ uint16_t val16;
+ uint32_t val32;
+ BitBuffer buffer(bytes, 8);
+ EXPECT_TRUE(buffer.ConsumeBits(3));
+ EXPECT_TRUE(buffer.ReadUInt8(&val8));
+ EXPECT_EQ(0xFEu, val8);
+ EXPECT_TRUE(buffer.ReadUInt16(&val16));
+ EXPECT_EQ(0xDCBAu, val16);
+ EXPECT_TRUE(buffer.ReadUInt32(&val32));
+ EXPECT_EQ(0x98765432u, val32);
+ // 5 bits left unread. Not enough to read a uint8_t.
+ EXPECT_EQ(5u, buffer.RemainingBitCount());
+ EXPECT_FALSE(buffer.ReadUInt8(&val8));
+}
+
+TEST(BitBufferTest, ReadBits) {
+ // Bit values are:
+ // 0b01001101,
+ // 0b00110010
+ const uint8_t bytes[] = {0x4D, 0x32};
+ uint32_t val;
+ BitBuffer buffer(bytes, 2);
+ EXPECT_TRUE(buffer.ReadBits(&val, 3));
+ // 0b010
+ EXPECT_EQ(0x2u, val);
+ EXPECT_TRUE(buffer.ReadBits(&val, 2));
+ // 0b01
+ EXPECT_EQ(0x1u, val);
+ EXPECT_TRUE(buffer.ReadBits(&val, 7));
+ // 0b1010011
+ EXPECT_EQ(0x53u, val);
+ EXPECT_TRUE(buffer.ReadBits(&val, 2));
+ // 0b00
+ EXPECT_EQ(0x0u, val);
+ EXPECT_TRUE(buffer.ReadBits(&val, 1));
+ // 0b1
+ EXPECT_EQ(0x1u, val);
+ EXPECT_TRUE(buffer.ReadBits(&val, 1));
+ // 0b0
+ EXPECT_EQ(0x0u, val);
+
+ EXPECT_FALSE(buffer.ReadBits(&val, 1));
+}
+
+TEST(BitBufferTest, SetOffsetValues) {
+ uint8_t bytes[4] = {0};
+ BitBufferWriter buffer(bytes, 4);
+
+ size_t byte_offset, bit_offset;
+ // Bit offsets are [0,7].
+ EXPECT_TRUE(buffer.Seek(0, 0));
+ EXPECT_TRUE(buffer.Seek(0, 7));
+ buffer.GetCurrentOffset(&byte_offset, &bit_offset);
+ EXPECT_EQ(0u, byte_offset);
+ EXPECT_EQ(7u, bit_offset);
+ EXPECT_FALSE(buffer.Seek(0, 8));
+ buffer.GetCurrentOffset(&byte_offset, &bit_offset);
+ EXPECT_EQ(0u, byte_offset);
+ EXPECT_EQ(7u, bit_offset);
+ // Byte offsets are [0,length]. At byte offset length, the bit offset must be
+ // 0.
+ EXPECT_TRUE(buffer.Seek(0, 0));
+ EXPECT_TRUE(buffer.Seek(2, 4));
+ buffer.GetCurrentOffset(&byte_offset, &bit_offset);
+ EXPECT_EQ(2u, byte_offset);
+ EXPECT_EQ(4u, bit_offset);
+ EXPECT_TRUE(buffer.Seek(4, 0));
+ EXPECT_FALSE(buffer.Seek(5, 0));
+ buffer.GetCurrentOffset(&byte_offset, &bit_offset);
+ EXPECT_EQ(4u, byte_offset);
+ EXPECT_EQ(0u, bit_offset);
+ EXPECT_FALSE(buffer.Seek(4, 1));
+
+ // Disable death test on Android because it relies on fork() and doesn't play
+ // nicely.
+#if defined(GTEST_HAS_DEATH_TEST)
+#if !defined(WEBRTC_ANDROID)
+ // Passing a NULL out parameter is death.
+ EXPECT_DEATH(buffer.GetCurrentOffset(&byte_offset, NULL), "");
+#endif
+#endif
+}
+
+uint64_t GolombEncoded(uint32_t val) {
+ val++;
+ uint32_t bit_counter = val;
+ uint64_t bit_count = 0;
+ while (bit_counter > 0) {
+ bit_count++;
+ bit_counter >>= 1;
+ }
+ return static_cast<uint64_t>(val) << (64 - (bit_count * 2 - 1));
+}
+
+TEST(BitBufferTest, GolombUint32Values) {
+ ByteBuffer byteBuffer;
+ byteBuffer.Resize(16);
+ BitBuffer buffer(reinterpret_cast<const uint8_t*>(byteBuffer.Data()),
+ byteBuffer.Capacity());
+ // Test over the uint32_t range with a large enough step that the test doesn't
+ // take forever. Around 20,000 iterations should do.
+ const int kStep = std::numeric_limits<uint32_t>::max() / 20000;
+ for (uint32_t i = 0; i < std::numeric_limits<uint32_t>::max() - kStep;
+ i += kStep) {
+ uint64_t encoded_val = GolombEncoded(i);
+ byteBuffer.Clear();
+ byteBuffer.WriteUInt64(encoded_val);
+ uint32_t decoded_val;
+ EXPECT_TRUE(buffer.Seek(0, 0));
+ EXPECT_TRUE(buffer.ReadExponentialGolomb(&decoded_val));
+ EXPECT_EQ(i, decoded_val);
+ }
+}
+
+TEST(BitBufferTest, SignedGolombValues) {
+ uint8_t golomb_bits[] = {
+ 0x80, // 1
+ 0x40, // 010
+ 0x60, // 011
+ 0x20, // 00100
+ 0x38, // 00111
+ };
+ int32_t expected[] = {0, 1, -1, 2, -3};
+ for (size_t i = 0; i < sizeof(golomb_bits); ++i) {
+ BitBuffer buffer(&golomb_bits[i], 1);
+ int32_t decoded_val;
+ ASSERT_TRUE(buffer.ReadSignedExponentialGolomb(&decoded_val));
+ EXPECT_EQ(expected[i], decoded_val)
+ << "Mismatch in expected/decoded value for golomb_bits[" << i
+ << "]: " << static_cast<int>(golomb_bits[i]);
+ }
+}
+
+TEST(BitBufferTest, NoGolombOverread) {
+ const uint8_t bytes[] = {0x00, 0xFF, 0xFF};
+ // Make sure the bit buffer correctly enforces byte length on golomb reads.
+ // If it didn't, the above buffer would be valid at 3 bytes.
+ BitBuffer buffer(bytes, 1);
+ uint32_t decoded_val;
+ EXPECT_FALSE(buffer.ReadExponentialGolomb(&decoded_val));
+
+ BitBuffer longer_buffer(bytes, 2);
+ EXPECT_FALSE(longer_buffer.ReadExponentialGolomb(&decoded_val));
+
+ BitBuffer longest_buffer(bytes, 3);
+ EXPECT_TRUE(longest_buffer.ReadExponentialGolomb(&decoded_val));
+ // Golomb should have read 9 bits, so 0x01FF, and since it is golomb, the
+ // result is 0x01FF - 1 = 0x01FE.
+ EXPECT_EQ(0x01FEu, decoded_val);
+}
+
+TEST(BitBufferWriterTest, SymmetricReadWrite) {
+ uint8_t bytes[16] = {0};
+ BitBufferWriter buffer(bytes, 4);
+
+ // Write some bit data at various sizes.
+ EXPECT_TRUE(buffer.WriteBits(0x2u, 3));
+ EXPECT_TRUE(buffer.WriteBits(0x1u, 2));
+ EXPECT_TRUE(buffer.WriteBits(0x53u, 7));
+ EXPECT_TRUE(buffer.WriteBits(0x0u, 2));
+ EXPECT_TRUE(buffer.WriteBits(0x1u, 1));
+ EXPECT_TRUE(buffer.WriteBits(0x1ABCDu, 17));
+ // That should be all that fits in the buffer.
+ EXPECT_FALSE(buffer.WriteBits(1, 1));
+
+ EXPECT_TRUE(buffer.Seek(0, 0));
+ uint32_t val;
+ EXPECT_TRUE(buffer.ReadBits(&val, 3));
+ EXPECT_EQ(0x2u, val);
+ EXPECT_TRUE(buffer.ReadBits(&val, 2));
+ EXPECT_EQ(0x1u, val);
+ EXPECT_TRUE(buffer.ReadBits(&val, 7));
+ EXPECT_EQ(0x53u, val);
+ EXPECT_TRUE(buffer.ReadBits(&val, 2));
+ EXPECT_EQ(0x0u, val);
+ EXPECT_TRUE(buffer.ReadBits(&val, 1));
+ EXPECT_EQ(0x1u, val);
+ EXPECT_TRUE(buffer.ReadBits(&val, 17));
+ EXPECT_EQ(0x1ABCDu, val);
+ // And there should be nothing left.
+ EXPECT_FALSE(buffer.ReadBits(&val, 1));
+}
+
+TEST(BitBufferWriterTest, SymmetricBytesMisaligned) {
+ uint8_t bytes[16] = {0};
+ BitBufferWriter buffer(bytes, 16);
+
+ // Offset 3, to get things misaligned.
+ EXPECT_TRUE(buffer.ConsumeBits(3));
+ EXPECT_TRUE(buffer.WriteUInt8(0x12u));
+ EXPECT_TRUE(buffer.WriteUInt16(0x3456u));
+ EXPECT_TRUE(buffer.WriteUInt32(0x789ABCDEu));
+
+ buffer.Seek(0, 3);
+ uint8_t val8;
+ uint16_t val16;
+ uint32_t val32;
+ EXPECT_TRUE(buffer.ReadUInt8(&val8));
+ EXPECT_EQ(0x12u, val8);
+ EXPECT_TRUE(buffer.ReadUInt16(&val16));
+ EXPECT_EQ(0x3456u, val16);
+ EXPECT_TRUE(buffer.ReadUInt32(&val32));
+ EXPECT_EQ(0x789ABCDEu, val32);
+}
+
+TEST(BitBufferWriterTest, SymmetricGolomb) {
+ char test_string[] = "my precious";
+ uint8_t bytes[64] = {0};
+ BitBufferWriter buffer(bytes, 64);
+ for (size_t i = 0; i < arraysize(test_string); ++i) {
+ EXPECT_TRUE(buffer.WriteExponentialGolomb(test_string[i]));
+ }
+ buffer.Seek(0, 0);
+ for (size_t i = 0; i < arraysize(test_string); ++i) {
+ uint32_t val;
+ EXPECT_TRUE(buffer.ReadExponentialGolomb(&val));
+ EXPECT_LE(val, std::numeric_limits<uint8_t>::max());
+ EXPECT_EQ(test_string[i], static_cast<char>(val));
+ }
+}
+
+TEST(BitBufferWriterTest, WriteClearsBits) {
+ uint8_t bytes[] = {0xFF, 0xFF};
+ BitBufferWriter buffer(bytes, 2);
+ EXPECT_TRUE(buffer.ConsumeBits(3));
+ EXPECT_TRUE(buffer.WriteBits(0, 1));
+ EXPECT_EQ(0xEFu, bytes[0]);
+ EXPECT_TRUE(buffer.WriteBits(0, 3));
+ EXPECT_EQ(0xE1u, bytes[0]);
+ EXPECT_TRUE(buffer.WriteBits(0, 2));
+ EXPECT_EQ(0xE0u, bytes[0]);
+ EXPECT_EQ(0x7F, bytes[1]);
+}
+
+} // namespace rtc
--- a/media/webrtc/trunk/webrtc/base/buffer.h
+++ b/media/webrtc/trunk/webrtc/base/buffer.h
@@ -8,17 +8,18 @@
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef WEBRTC_BASE_BUFFER_H_
#define WEBRTC_BASE_BUFFER_H_
#include <string.h>
-#include "webrtc/base/common.h"
+// common.h isn't in the rtc_approved list
+//#include "webrtc/base/common.h"
#include "webrtc/base/scoped_ptr.h"
namespace rtc {
// Basic buffer class, can be grown and shrunk dynamically.
// Unlike std::string/vector, does not initialize data when expanding capacity.
class Buffer {
public:
@@ -47,22 +48,22 @@ class Buffer {
bool operator==(const Buffer& buf) const {
return (size_ == buf.size() && memcmp(data_.get(), buf.data(), size_) == 0);
}
bool operator!=(const Buffer& buf) const {
return !operator==(buf);
}
void SetData(const void* data, size_t size) {
- ASSERT(data != NULL || size == 0);
+ assert(data != NULL || size == 0);
SetSize(size);
memcpy(data_.get(), data, size);
}
void AppendData(const void* data, size_t size) {
- ASSERT(data != NULL || size == 0);
+ assert(data != NULL || size == 0);
size_t old_size = size_;
SetSize(size_ + size);
memcpy(data_.get() + old_size, data, size);
}
void SetSize(size_t size) {
SetCapacity(size);
size_ = size;
}
@@ -71,17 +72,17 @@ class Buffer {
rtc::scoped_ptr<char[]> data(new char[capacity]);
memcpy(data.get(), data_.get(), size_);
data_.swap(data);
capacity_ = capacity;
}
}
void TransferTo(Buffer* buf) {
- ASSERT(buf != NULL);
+ assert(buf != NULL);
buf->data_.reset(data_.release());
buf->size_ = size_;
buf->capacity_ = capacity_;
Construct(NULL, 0, 0);
}
protected:
void Construct(const void* data, size_t size, size_t capacity) {
--- a/media/webrtc/trunk/webrtc/base/checks.h
+++ b/media/webrtc/trunk/webrtc/base/checks.h
@@ -86,16 +86,18 @@ namespace rtc {
// compilation mode.
//
// We make sure CHECK et al. always evaluates their arguments, as
// doing CHECK(FunctionWithSideEffect()) is a common idiom.
#define CHECK(condition) \
LAZY_STREAM(rtc::FatalMessage(__FILE__, __LINE__).stream(), !(condition)) \
<< "Check failed: " #condition << std::endl << "# "
+#define RTC_CHECK(condition) CHECK(condition)
+
// Helper macro for binary operators.
// Don't use this macro directly in your code, use CHECK_EQ et al below.
//
// TODO(akalin): Rewrite this so that constructs like if (...)
// CHECK_EQ(...) else { ... } work properly.
#define CHECK_OP(name, op, val1, val2) \
if (std::string* _result = \
rtc::Check##name##Impl((val1), (val2), \
@@ -180,16 +182,46 @@ DEFINE_CHECK_OP_IMPL(GT, > )
#define DCHECK_EQ(v1, v2) EAT_STREAM_PARAMETERS((v1) == (v2))
#define DCHECK_NE(v1, v2) EAT_STREAM_PARAMETERS((v1) != (v2))
#define DCHECK_LE(v1, v2) EAT_STREAM_PARAMETERS((v1) <= (v2))
#define DCHECK_LT(v1, v2) EAT_STREAM_PARAMETERS((v1) < (v2))
#define DCHECK_GE(v1, v2) EAT_STREAM_PARAMETERS((v1) >= (v2))
#define DCHECK_GT(v1, v2) EAT_STREAM_PARAMETERS((v1) > (v2))
#endif
+#define RTC_CHECK_EQ(val1, val2) CHECK_OP(EQ, ==, val1, val2)
+#define RTC_CHECK_NE(val1, val2) CHECK_OP(NE, !=, val1, val2)
+#define RTC_CHECK_LE(val1, val2) CHECK_OP(LE, <=, val1, val2)
+#define RTC_CHECK_LT(val1, val2) CHECK_OP(LT, < , val1, val2)
+#define RTC_CHECK_GE(val1, val2) CHECK_OP(GE, >=, val1, val2)
+#define RTC_CHECK_GT(val1, val2) CHECK_OP(GT, > , val1, val2)
+
+// The RTC_DCHECK macro is equivalent to RTC_CHECK except that it only generates
+// code in debug builds. It does reference the condition parameter in all cases,
+// though, so callers won't risk getting warnings about unused variables.
+#if (!defined(NDEBUG) || defined(DCHECK_ALWAYS_ON))
+#define RTC_DCHECK_IS_ON 1
+#define RTC_DCHECK(condition) CHECK(condition)
+#define RTC_DCHECK_EQ(v1, v2) CHECK_EQ(v1, v2)
+#define RTC_DCHECK_NE(v1, v2) CHECK_NE(v1, v2)
+#define RTC_DCHECK_LE(v1, v2) CHECK_LE(v1, v2)
+#define RTC_DCHECK_LT(v1, v2) CHECK_LT(v1, v2)
+#define RTC_DCHECK_GE(v1, v2) CHECK_GE(v1, v2)
+#define RTC_DCHECK_GT(v1, v2) CHECK_GT(v1, v2)
+#else
+#define RTC_DCHECK_IS_ON 0
+#define RTC_DCHECK(condition) EAT_STREAM_PARAMETERS(condition)
+#define RTC_DCHECK_EQ(v1, v2) EAT_STREAM_PARAMETERS((v1) == (v2))
+#define RTC_DCHECK_NE(v1, v2) EAT_STREAM_PARAMETERS((v1) != (v2))
+#define RTC_DCHECK_LE(v1, v2) EAT_STREAM_PARAMETERS((v1) <= (v2))
+#define RTC_DCHECK_LT(v1, v2) EAT_STREAM_PARAMETERS((v1) < (v2))
+#define RTC_DCHECK_GE(v1, v2) EAT_STREAM_PARAMETERS((v1) >= (v2))
+#define RTC_DCHECK_GT(v1, v2) EAT_STREAM_PARAMETERS((v1) > (v2))
+#endif
+
// This is identical to LogMessageVoidify but in name.
class FatalMessageVoidify {
public:
FatalMessageVoidify() { }
// This has to be an operator with a precedence lower than << but
// higher than ?:
void operator&(std::ostream&) { }
};
--- a/media/webrtc/trunk/webrtc/base/constructormagic.h
+++ b/media/webrtc/trunk/webrtc/base/constructormagic.h
@@ -12,34 +12,42 @@
#define WEBRTC_BASE_CONSTRUCTORMAGIC_H_
// Undefine macros first, just in case. Some third-party includes have their own
// version.
#undef DISALLOW_ASSIGN
#define DISALLOW_ASSIGN(TypeName) \
void operator=(const TypeName&)
+#define RTC_DISALLOW_ASSIGN(TypeName) \
+ void operator=(const TypeName&) = delete
// A macro to disallow the evil copy constructor and operator= functions
// This should be used in the private: declarations for a class.
#undef DISALLOW_COPY_AND_ASSIGN
#define DISALLOW_COPY_AND_ASSIGN(TypeName) \
TypeName(const TypeName&); \
DISALLOW_ASSIGN(TypeName)
+#define RTC_DISALLOW_COPY_AND_ASSIGN(TypeName) \
+ TypeName(const TypeName&) = delete; \
+ RTC_DISALLOW_ASSIGN(TypeName)
// Alternative, less-accurate legacy name.
#undef DISALLOW_EVIL_CONSTRUCTORS
#define DISALLOW_EVIL_CONSTRUCTORS(TypeName) \
DISALLOW_COPY_AND_ASSIGN(TypeName)
// A macro to disallow all the implicit constructors, namely the
// default constructor, copy constructor and operator= functions.
//
// This should be used in the private: declarations for a class
// that wants to prevent anyone from instantiating it. This is
// especially useful for classes containing only static methods.
#undef DISALLOW_IMPLICIT_CONSTRUCTORS
#define DISALLOW_IMPLICIT_CONSTRUCTORS(TypeName) \
TypeName(); \
DISALLOW_EVIL_CONSTRUCTORS(TypeName)
+#define RTC_DISALLOW_IMPLICIT_CONSTRUCTORS(TypeName) \
+ TypeName() = delete; \
+ RTC_DISALLOW_COPY_AND_ASSIGN(TypeName)
#endif // WEBRTC_BASE_CONSTRUCTORMAGIC_H_
--- a/media/webrtc/trunk/webrtc/common_types.h
+++ b/media/webrtc/trunk/webrtc/common_types.h
@@ -634,16 +634,19 @@ struct VideoCodecVP8 {
struct VideoCodecVP9 {
VideoCodecComplexity complexity;
int resilience;
unsigned char numberOfTemporalLayers;
bool denoisingOn;
bool frameDroppingOn;
int keyFrameInterval;
bool adaptiveQpMode;
+ bool automaticResizeOn;
+ unsigned char numberOfSpatialLayers;
+ bool flexibleMode;
};
// H264 specific.
struct VideoCodecH264 {
VideoCodecProfile profile;
uint8_t profile_byte;
uint8_t constraints;
uint8_t level;
--- a/media/webrtc/trunk/webrtc/modules/rtp_rtcp/rtp_rtcp.gypi
+++ b/media/webrtc/trunk/webrtc/modules/rtp_rtcp/rtp_rtcp.gypi
@@ -87,16 +87,18 @@
'source/rtp_sender_video.h',
'source/video_codec_information.h',
'source/rtp_format.cc',
'source/rtp_format.h',
'source/rtp_format_h264.cc',
'source/rtp_format_h264.h',
'source/rtp_format_vp8.cc',
'source/rtp_format_vp8.h',
+ 'source/rtp_format_vp9.cc',
+ 'source/rtp_format_vp9.h',
'source/rtp_format_video_generic.cc',
'source/rtp_format_video_generic.h',
'source/vp8_partition_aggregator.cc',
'source/vp8_partition_aggregator.h',
# Mocks
'mocks/mock_rtp_rtcp.h',
'source/mock/mock_rtp_payload_strategy.h',
], # source
--- a/media/webrtc/trunk/webrtc/modules/rtp_rtcp/source/rtp_format_vp9.h
+++ b/media/webrtc/trunk/webrtc/modules/rtp_rtcp/source/rtp_format_vp9.h
@@ -20,17 +20,17 @@
#ifndef WEBRTC_MODULES_RTP_RTCP_SOURCE_RTP_FORMAT_VP9_H_
#define WEBRTC_MODULES_RTP_RTCP_SOURCE_RTP_FORMAT_VP9_H_
#include <queue>
#include <string>
#include "webrtc/base/constructormagic.h"
-#include "webrtc/modules/include/module_common_types.h"
+#include "webrtc/modules/interface/module_common_types.h"
#include "webrtc/modules/rtp_rtcp/source/rtp_format.h"
#include "webrtc/typedefs.h"
namespace webrtc {
class RtpPacketizerVp9 : public RtpPacketizer {
public:
RtpPacketizerVp9(const RTPVideoHeaderVP9& hdr, size_t max_payload_length);
@@ -86,17 +86,17 @@ class RtpPacketizerVp9 : public RtpPacke
size_t* header_length) const;
const RTPVideoHeaderVP9 hdr_;
const size_t max_payload_length_; // The max length in bytes of one packet.
const uint8_t* payload_; // The payload data to be packetized.
size_t payload_size_; // The size in bytes of the payload data.
PacketInfoQueue packets_;
- RTC_DISALLOW_COPY_AND_ASSIGN(RtpPacketizerVp9);
+ DISALLOW_COPY_AND_ASSIGN(RtpPacketizerVp9);
};
class RtpDepacketizerVp9 : public RtpDepacketizer {
public:
virtual ~RtpDepacketizerVp9() {}
bool Parse(ParsedPayload* parsed_payload,
--- a/media/webrtc/trunk/webrtc/modules/video_capture/linux/device_info_linux.cc
+++ b/media/webrtc/trunk/webrtc/modules/video_capture/linux/device_info_linux.cc
@@ -159,17 +159,17 @@ int32_t DeviceInfoLinux::GetDeviceName(
{
WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceVideoCapture, _id,
"buffer passed is too small");
return -1;
}
} else {
// if there's no bus info to use for uniqueId, invent one - and it has to be repeatable
if (snprintf(deviceUniqueIdUTF8, deviceUniqueIdUTF8Length, "fake_%u", device_index) >=
- deviceUniqueIdUTF8Length)
+ (int) deviceUniqueIdUTF8Length)
{
WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceVideoCapture, _id,
"buffer passed is too small");
return -1;
}
}
return 0;
--- a/media/webrtc/trunk/webrtc/modules/video_coding/codecs/interface/video_codec_interface.h
+++ b/media/webrtc/trunk/webrtc/modules/video_coding/codecs/interface/video_codec_interface.h
@@ -38,21 +38,21 @@ struct CodecSpecificInfoVP8 {
uint8_t simulcastIdx;
uint8_t temporalIdx;
bool layerSync;
int tl0PicIdx; // Negative value to skip tl0PicIdx.
int8_t keyIdx; // Negative value to skip keyIdx.
};
struct CodecSpecificInfoVP9 {
- bool hasReceivedSLI;
- uint8_t pictureIdSLI;
- bool hasReceivedRPSI;
- uint64_t pictureIdRPSI;
- int16_t pictureId; // Negative value to skip pictureId.
+ bool has_received_sli;
+ uint8_t picture_id_sli;
+ bool has_received_rpsi;
+ uint64_t picture_id_rpsi;
+ int16_t picture_id; // Negative value to skip pictureId.
bool inter_pic_predicted; // This layer frame is dependent on previously
// coded frame(s).
bool flexible_mode;
bool ss_data_available;
int tl0_pic_idx; // Negative value to skip tl0PicIdx.
uint8_t temporal_idx;
--- a/media/webrtc/trunk/webrtc/modules/video_coding/codecs/vp9/screenshare_layers.cc
+++ b/media/webrtc/trunk/webrtc/modules/video_coding/codecs/vp9/screenshare_layers.cc
@@ -13,39 +13,39 @@
namespace webrtc {
ScreenshareLayersVP9::ScreenshareLayersVP9(uint8_t num_layers)
: num_layers_(num_layers),
start_layer_(0),
last_timestamp_(0),
timestamp_initialized_(false) {
- RTC_DCHECK_GT(num_layers, 0);
- RTC_DCHECK_LE(num_layers, kMaxVp9NumberOfSpatialLayers);
+ DCHECK_GT(num_layers, 0);
+ DCHECK_LE(num_layers, kMaxVp9NumberOfSpatialLayers);
memset(bits_used_, 0, sizeof(bits_used_));
memset(threshold_kbps_, 0, sizeof(threshold_kbps_));
}
uint8_t ScreenshareLayersVP9::GetStartLayer() const {
return start_layer_;
}
void ScreenshareLayersVP9::ConfigureBitrate(int threshold_kbps,
uint8_t layer_id) {
// The upper layer is always the layer we spill frames
// to when the bitrate becomes to high, therefore setting
// a max limit is not allowed. The top layer bitrate is
// never used either so configuring it makes no difference.
- RTC_DCHECK_LT(layer_id, num_layers_ - 1);
+ DCHECK_LT(layer_id, num_layers_ - 1);
threshold_kbps_[layer_id] = threshold_kbps;
}
void ScreenshareLayersVP9::LayerFrameEncoded(unsigned int size_bytes,
uint8_t layer_id) {
- RTC_DCHECK_LT(layer_id, num_layers_);
+ DCHECK_LT(layer_id, num_layers_);
bits_used_[layer_id] += size_bytes * 8;
}
VP9EncoderImpl::SuperFrameRefSettings
ScreenshareLayersVP9::GetSuperFrameSettings(uint32_t timestamp,
bool is_keyframe) {
VP9EncoderImpl::SuperFrameRefSettings settings;
if (!timestamp_initialized_) {
--- a/media/webrtc/trunk/webrtc/modules/video_coding/codecs/vp9/vp9.gyp
+++ b/media/webrtc/trunk/webrtc/modules/video_coding/codecs/vp9/vp9.gyp
@@ -19,17 +19,21 @@
'<(webrtc_root)/modules/video_coding/utility/video_coding_utility.gyp:video_coding_utility',
'<(webrtc_root)/system_wrappers/system_wrappers.gyp:system_wrappers',
],
'conditions': [
['build_libvpx==1', {
'dependencies': [
'<(libvpx_dir)/libvpx.gyp:libvpx_new',
],
- }],
+ }, {
+ 'include_dirs': [
+ '../../../../../../../libvpx',
+ ],
+ }],
['build_vp9==1', {
'sources': [
'include/vp9.h',
'screenshare_layers.cc',
'screenshare_layers.h',
'vp9_frame_buffer_pool.cc',
'vp9_frame_buffer_pool.h',
'vp9_impl.cc',
--- a/media/webrtc/trunk/webrtc/modules/video_coding/codecs/vp9/vp9_frame_buffer_pool.cc
+++ b/media/webrtc/trunk/webrtc/modules/video_coding/codecs/vp9/vp9_frame_buffer_pool.cc
@@ -11,35 +11,35 @@
#include "webrtc/modules/video_coding/codecs/vp9/vp9_frame_buffer_pool.h"
#include "vpx/vpx_codec.h"
#include "vpx/vpx_decoder.h"
#include "vpx/vpx_frame_buffer.h"
#include "webrtc/base/checks.h"
-#include "webrtc/system_wrappers/include/logging.h"
+#include "webrtc/system_wrappers/interface/logging.h"
namespace webrtc {
uint8_t* Vp9FrameBufferPool::Vp9FrameBuffer::GetData() {
- return data_.data<uint8_t>();
+ return (uint8_t*)(data_.data()); //data<uint8_t>();
}
size_t Vp9FrameBufferPool::Vp9FrameBuffer::GetDataSize() const {
return data_.size();
}
void Vp9FrameBufferPool::Vp9FrameBuffer::SetSize(size_t size) {
data_.SetSize(size);
}
bool Vp9FrameBufferPool::InitializeVpxUsePool(
vpx_codec_ctx* vpx_codec_context) {
- RTC_DCHECK(vpx_codec_context);
+ DCHECK(vpx_codec_context);
// Tell libvpx to use this pool.
if (vpx_codec_set_frame_buffer_functions(
// In which context to use these callback functions.
vpx_codec_context,
// Called by libvpx when it needs another frame buffer.
&Vp9FrameBufferPool::VpxGetFrameBuffer,
// Called by libvpx when it no longer uses a frame buffer.
&Vp9FrameBufferPool::VpxReleaseFrameBuffer,
@@ -48,17 +48,17 @@ bool Vp9FrameBufferPool::InitializeVpxUs
// Failed to configure libvpx to use Vp9FrameBufferPool.
return false;
}
return true;
}
rtc::scoped_refptr<Vp9FrameBufferPool::Vp9FrameBuffer>
Vp9FrameBufferPool::GetFrameBuffer(size_t min_size) {
- RTC_DCHECK_GT(min_size, 0u);
+ DCHECK_GT(min_size, 0u);
rtc::scoped_refptr<Vp9FrameBuffer> available_buffer = nullptr;
{
rtc::CritScope cs(&buffers_lock_);
// Do we have a buffer we can recycle?
for (const auto& buffer : allocated_buffers_) {
if (buffer->HasOneRef()) {
available_buffer = buffer;
break;
@@ -96,18 +96,18 @@ void Vp9FrameBufferPool::ClearPool() {
rtc::CritScope cs(&buffers_lock_);
allocated_buffers_.clear();
}
// static
int32_t Vp9FrameBufferPool::VpxGetFrameBuffer(void* user_priv,
size_t min_size,
vpx_codec_frame_buffer* fb) {
- RTC_DCHECK(user_priv);
- RTC_DCHECK(fb);
+ DCHECK(user_priv);
+ DCHECK(fb);
Vp9FrameBufferPool* pool = static_cast<Vp9FrameBufferPool*>(user_priv);
rtc::scoped_refptr<Vp9FrameBuffer> buffer = pool->GetFrameBuffer(min_size);
fb->data = buffer->GetData();
fb->size = buffer->GetDataSize();
// Store Vp9FrameBuffer* in |priv| for use in VpxReleaseFrameBuffer.
// This also makes vpx_codec_get_frame return images with their |fb_priv| set
// to |buffer| which is important for external reference counting.
@@ -115,18 +115,18 @@ int32_t Vp9FrameBufferPool::VpxGetFrameB
// |buffer| goes out of scope.
fb->priv = static_cast<void*>(buffer.release());
return 0;
}
// static
int32_t Vp9FrameBufferPool::VpxReleaseFrameBuffer(void* user_priv,
vpx_codec_frame_buffer* fb) {
- RTC_DCHECK(user_priv);
- RTC_DCHECK(fb);
+ DCHECK(user_priv);
+ DCHECK(fb);
Vp9FrameBuffer* buffer = static_cast<Vp9FrameBuffer*>(fb->priv);
if (buffer != nullptr) {
buffer->Release();
// When libvpx fails to decode and you continue to try to decode (and fail)
// libvpx can for some reason try to release the same buffer multiple times.
// Setting |priv| to null protects against trying to Release multiple times.
fb->priv = nullptr;
}
--- a/media/webrtc/trunk/webrtc/modules/video_coding/codecs/vp9/vp9_impl.cc
+++ b/media/webrtc/trunk/webrtc/modules/video_coding/codecs/vp9/vp9_impl.cc
@@ -21,20 +21,20 @@
#include "vpx/vp8cx.h"
#include "vpx/vp8dx.h"
#include "webrtc/base/bind.h"
#include "webrtc/base/checks.h"
#include "webrtc/base/trace_event.h"
#include "webrtc/common.h"
#include "webrtc/common_video/libyuv/include/webrtc_libyuv.h"
-#include "webrtc/modules/include/module_common_types.h"
+#include "webrtc/modules/interface/module_common_types.h"
#include "webrtc/modules/video_coding/codecs/vp9/screenshare_layers.h"
-#include "webrtc/system_wrappers/include/logging.h"
-#include "webrtc/system_wrappers/include/tick_util.h"
+#include "webrtc/system_wrappers/interface/logging.h"
+#include "webrtc/system_wrappers/interface/tick_util.h"
namespace {
// VP9DecoderImpl::ReturnFrame helper function used with WrappedI420Buffer.
static void WrappedI420BufferNoLongerUsedCb(
webrtc::Vp9FrameBufferPool::Vp9FrameBuffer* img_buffer) {
img_buffer->Release();
}
@@ -114,40 +114,46 @@ int VP9EncoderImpl::Release() {
}
inited_ = false;
return WEBRTC_VIDEO_CODEC_OK;
}
bool VP9EncoderImpl::ExplicitlyConfiguredSpatialLayers() const {
// We check target_bitrate_bps of the 0th layer to see if the spatial layers
// (i.e. bitrates) were explicitly configured.
+#ifdef LIBVPX_SVC
return num_spatial_layers_ > 1 &&
codec_.spatialLayers[0].target_bitrate_bps > 0;
+#else
+ return false;
+#endif
}
bool VP9EncoderImpl::SetSvcRates() {
uint8_t i = 0;
if (ExplicitlyConfiguredSpatialLayers()) {
+#ifdef LIBVPX_SVC
if (num_temporal_layers_ > 1) {
LOG(LS_ERROR) << "Multiple temporal layers when manually specifying "
"spatial layers not implemented yet!";
return false;
}
int total_bitrate_bps = 0;
for (i = 0; i < num_spatial_layers_; ++i)
total_bitrate_bps += codec_.spatialLayers[i].target_bitrate_bps;
// If total bitrate differs now from what has been specified at the
// beginning, update the bitrates in the same ratio as before.
for (i = 0; i < num_spatial_layers_; ++i) {
config_->ss_target_bitrate[i] = config_->layer_target_bitrate[i] =
static_cast<int>(static_cast<int64_t>(config_->rc_target_bitrate) *
codec_.spatialLayers[i].target_bitrate_bps /
total_bitrate_bps);
}
+#endif
} else {
float rate_ratio[VPX_MAX_LAYERS] = {0};
float total = 0;
for (i = 0; i < num_spatial_layers_; ++i) {
if (svc_internal_.svc_params.scaling_factor_num[i] <= 0 ||
svc_internal_.svc_params.scaling_factor_den[i] <= 0) {
LOG(LS_ERROR) << "Scaling factors not specified!";
@@ -387,23 +393,25 @@ int VP9EncoderImpl::NumberOfThreads(int
return 1;
}
}
int VP9EncoderImpl::InitAndSetControlSettings(const VideoCodec* inst) {
config_->ss_number_layers = num_spatial_layers_;
if (ExplicitlyConfiguredSpatialLayers()) {
+#ifdef LIBVPX_SVC
for (int i = 0; i < num_spatial_layers_; ++i) {
const auto& layer = codec_.spatialLayers[i];
svc_internal_.svc_params.max_quantizers[i] = config_->rc_max_quantizer;
svc_internal_.svc_params.min_quantizers[i] = config_->rc_min_quantizer;
svc_internal_.svc_params.scaling_factor_num[i] = layer.scaling_factor_num;
svc_internal_.svc_params.scaling_factor_den[i] = layer.scaling_factor_den;
}
+#endif
} else {
int scaling_factor_num = 256;
for (int i = num_spatial_layers_ - 1; i >= 0; --i) {
svc_internal_.svc_params.max_quantizers[i] = config_->rc_max_quantizer;
svc_internal_.svc_params.min_quantizers[i] = config_->rc_min_quantizer;
// 1:2 scaling in each dimension.
svc_internal_.svc_params.scaling_factor_num[i] = scaling_factor_num;
svc_internal_.svc_params.scaling_factor_den[i] = 256;
@@ -468,35 +476,35 @@ uint32_t VP9EncoderImpl::MaxIntraTarget(
float scale_par = 0.5;
uint32_t target_pct =
optimal_buffer_size * scale_par * codec_.maxFramerate / 10;
// Don't go below 3 times the per frame bandwidth.
const uint32_t min_intra_size = 300;
return (target_pct < min_intra_size) ? min_intra_size: target_pct;
}
-int VP9EncoderImpl::Encode(const VideoFrame& input_image,
+int VP9EncoderImpl::Encode(const I420VideoFrame& input_image,
const CodecSpecificInfo* codec_specific_info,
- const std::vector<FrameType>* frame_types) {
+ const std::vector<VideoFrameType>* frame_types) {
if (!inited_) {
return WEBRTC_VIDEO_CODEC_UNINITIALIZED;
}
if (input_image.IsZeroSize()) {
return WEBRTC_VIDEO_CODEC_ERR_PARAMETER;
}
if (encoded_complete_callback_ == NULL) {
return WEBRTC_VIDEO_CODEC_UNINITIALIZED;
}
- FrameType frame_type = kVideoFrameDelta;
+ VideoFrameType frame_type = kDeltaFrame;
// We only support one stream at the moment.
if (frame_types && frame_types->size() > 0) {
frame_type = (*frame_types)[0];
}
- RTC_DCHECK_EQ(input_image.width(), static_cast<int>(raw_->d_w));
- RTC_DCHECK_EQ(input_image.height(), static_cast<int>(raw_->d_h));
+ DCHECK_EQ(input_image.width(), static_cast<int>(raw_->d_w));
+ DCHECK_EQ(input_image.height(), static_cast<int>(raw_->d_h));
// Set input image for use in the callback.
// This was necessary since you need some information from input_image.
// You can save only the necessary information (such as timestamp) instead of
// doing this.
input_image_ = &input_image;
// Image in vpx_image_t format.
@@ -504,22 +512,23 @@ int VP9EncoderImpl::Encode(const VideoFr
raw_->planes[VPX_PLANE_Y] = const_cast<uint8_t*>(input_image.buffer(kYPlane));
raw_->planes[VPX_PLANE_U] = const_cast<uint8_t*>(input_image.buffer(kUPlane));
raw_->planes[VPX_PLANE_V] = const_cast<uint8_t*>(input_image.buffer(kVPlane));
raw_->stride[VPX_PLANE_Y] = input_image.stride(kYPlane);
raw_->stride[VPX_PLANE_U] = input_image.stride(kUPlane);
raw_->stride[VPX_PLANE_V] = input_image.stride(kVPlane);
vpx_enc_frame_flags_t flags = 0;
- bool send_keyframe = (frame_type == kVideoFrameKey);
+ bool send_keyframe = (frame_type == kKeyFrame);
if (send_keyframe) {
// Key frame request from caller.
flags = VPX_EFLAG_FORCE_KF;
}
+#ifdef LIBVPX_SVC
if (is_flexible_mode_) {
SuperFrameRefSettings settings;
// These structs are copied when calling vpx_codec_control,
// therefore it is ok for them to go out of scope.
vpx_svc_ref_frame_config enc_layer_conf;
vpx_svc_layer_id layer_id;
@@ -531,16 +540,17 @@ int VP9EncoderImpl::Encode(const VideoFr
send_keyframe);
}
enc_layer_conf = GenerateRefsAndFlags(settings);
layer_id.temporal_layer_id = 0;
layer_id.spatial_layer_id = settings.start_layer;
vpx_codec_control(encoder_, VP9E_SET_SVC_LAYER_ID, &layer_id);
vpx_codec_control(encoder_, VP9E_SET_SVC_REF_FRAME_CONFIG, &enc_layer_conf);
}
+#endif
assert(codec_.maxFramerate > 0);
uint32_t duration = 90000 / codec_.maxFramerate;
if (vpx_codec_encode(encoder_, raw_, timestamp_, duration, flags,
VPX_DL_REALTIME)) {
return WEBRTC_VIDEO_CODEC_ERROR;
}
timestamp_ += duration;
@@ -648,17 +658,17 @@ void VP9EncoderImpl::PopulateCodecSpecif
if (!vp9_info->flexible_mode) {
vp9_info->gof.CopyGofInfoVP9(gof_);
}
}
}
int VP9EncoderImpl::GetEncodedLayerFrame(const vpx_codec_cx_pkt* pkt) {
encoded_image_._length = 0;
- encoded_image_._frameType = kVideoFrameDelta;
+ encoded_image_._frameType = kDeltaFrame;
RTPFragmentationHeader frag_info;
// Note: no data partitioning in VP9, so 1 partition only. We keep this
// fragmentation data for now, until VP9 packetizer is implemented.
frag_info.VerifyAndAllocateFragmentationHeader(1);
int part_idx = 0;
CodecSpecificInfo codec_specific;
assert(pkt->kind == VPX_CODEC_CX_FRAME_PKT);
@@ -678,32 +688,33 @@ int VP9EncoderImpl::GetEncodedLayerFrame
static_cast<unsigned int>(encoded_image_._length),
layer_id.spatial_layer_id);
assert(encoded_image_._length <= encoded_image_._size);
// End of frame.
// Check if encoded frame is a key frame.
if (pkt->data.frame.flags & VPX_FRAME_IS_KEY) {
- encoded_image_._frameType = kVideoFrameKey;
+ encoded_image_._frameType = kKeyFrame;
}
PopulateCodecSpecific(&codec_specific, *pkt, input_image_->timestamp());
if (encoded_image_._length > 0) {
TRACE_COUNTER1("webrtc", "EncodedFrameSize", encoded_image_._length);
encoded_image_._timeStamp = input_image_->timestamp();
encoded_image_.capture_time_ms_ = input_image_->render_time_ms();
encoded_image_._encodedHeight = raw_->d_h;
encoded_image_._encodedWidth = raw_->d_w;
encoded_complete_callback_->Encoded(encoded_image_, &codec_specific,
&frag_info);
}
return WEBRTC_VIDEO_CODEC_OK;
}
+#ifdef LIBVPX_SVC
vpx_svc_ref_frame_config VP9EncoderImpl::GenerateRefsAndFlags(
const SuperFrameRefSettings& settings) {
static const vpx_enc_frame_flags_t kAllFlags =
VP8_EFLAG_NO_REF_ARF | VP8_EFLAG_NO_REF_GF | VP8_EFLAG_NO_REF_LAST |
VP8_EFLAG_NO_UPD_LAST | VP8_EFLAG_NO_UPD_ARF | VP8_EFLAG_NO_UPD_GF;
vpx_svc_ref_frame_config sf_conf = {};
if (settings.is_keyframe) {
// Used later on to make sure we don't make any invalid references.
@@ -724,18 +735,18 @@ vpx_svc_ref_frame_config VP9EncoderImpl:
int8_t refs[3] = {settings.layer[layer_idx].ref_buf1,
settings.layer[layer_idx].ref_buf2,
settings.layer[layer_idx].ref_buf3};
for (unsigned int ref_idx = 0; ref_idx < kMaxVp9RefPics; ++ref_idx) {
if (refs[ref_idx] == -1)
continue;
- RTC_DCHECK_GE(refs[ref_idx], 0);
- RTC_DCHECK_LE(refs[ref_idx], 7);
+ DCHECK_GE(refs[ref_idx], 0);
+ DCHECK_LE(refs[ref_idx], 7);
// Easier to remove flags from all flags rather than having to
// build the flags from 0.
switch (num_ref_pics_[layer_idx]) {
case 0: {
sf_conf.lst_fb_idx[layer_idx] = refs[ref_idx];
layer_flags &= ~VP8_EFLAG_NO_REF_LAST;
break;
}
@@ -747,17 +758,17 @@ vpx_svc_ref_frame_config VP9EncoderImpl:
case 2: {
sf_conf.alt_fb_idx[layer_idx] = refs[ref_idx];
layer_flags &= ~VP8_EFLAG_NO_REF_ARF;
break;
}
}
// Make sure we don't reference a buffer that hasn't been
// used at all or hasn't been used since a keyframe.
- RTC_DCHECK_NE(buffer_updated_at_frame_[refs[ref_idx]], -1);
+ DCHECK_NE(buffer_updated_at_frame_[refs[ref_idx]], -1);
p_diff_[layer_idx][num_ref_pics_[layer_idx]] =
frames_encoded_ - buffer_updated_at_frame_[refs[ref_idx]];
num_ref_pics_[layer_idx]++;
}
bool upd_buf_same_as_a_ref = false;
if (settings.layer[layer_idx].upd_buf != -1) {
@@ -795,16 +806,17 @@ vpx_svc_ref_frame_config VP9EncoderImpl:
buffer_updated_at_frame_[updated_buffer] = frames_encoded_;
sf_conf.frame_flags[layer_idx] = layer_flags;
}
}
}
++frames_encoded_;
return sf_conf;
}
+#endif
int VP9EncoderImpl::SetChannelParameters(uint32_t packet_loss, int64_t rtt) {
return WEBRTC_VIDEO_CODEC_OK;
}
int VP9EncoderImpl::RegisterEncodeCompleteCallback(
EncodedImageCallback* callback) {
encoded_complete_callback_ = callback;
@@ -886,17 +898,17 @@ int VP9DecoderImpl::Decode(const Encoded
if (!inited_) {
return WEBRTC_VIDEO_CODEC_UNINITIALIZED;
}
if (decode_complete_callback_ == NULL) {
return WEBRTC_VIDEO_CODEC_UNINITIALIZED;
}
// Always start with a complete key frame.
if (key_frame_required_) {
- if (input_image._frameType != kVideoFrameKey)
+ if (input_image._frameType != kKeyFrame)
return WEBRTC_VIDEO_CODEC_ERROR;
// We have a key frame - is it complete?
if (input_image._completeFrame) {
key_frame_required_ = false;
} else {
return WEBRTC_VIDEO_CODEC_ERROR;
}
}
@@ -927,40 +939,52 @@ int VP9DecoderImpl::Decode(const Encoded
}
int VP9DecoderImpl::ReturnFrame(const vpx_image_t* img, uint32_t timestamp) {
if (img == NULL) {
// Decoder OK and NULL image => No show frame.
return WEBRTC_VIDEO_CODEC_NO_OUTPUT;
}
+#ifdef USE_WRAPPED_I420_BUFFER
// This buffer contains all of |img|'s image data, a reference counted
// Vp9FrameBuffer. Performing AddRef/Release ensures it is not released and
// recycled during use (libvpx is done with the buffers after a few
// vpx_codec_decode calls or vpx_codec_destroy).
Vp9FrameBufferPool::Vp9FrameBuffer* img_buffer =
static_cast<Vp9FrameBufferPool::Vp9FrameBuffer*>(img->fb_priv);
img_buffer->AddRef();
// The buffer can be used directly by the VideoFrame (without copy) by
// using a WrappedI420Buffer.
rtc::scoped_refptr<WrappedI420Buffer> img_wrapped_buffer(
new rtc::RefCountedObject<webrtc::WrappedI420Buffer>(
img->d_w, img->d_h,
+ img->d_w, img->d_h,
img->planes[VPX_PLANE_Y], img->stride[VPX_PLANE_Y],
img->planes[VPX_PLANE_U], img->stride[VPX_PLANE_U],
img->planes[VPX_PLANE_V], img->stride[VPX_PLANE_V],
// WrappedI420Buffer's mechanism for allowing the release of its frame
// buffer is through a callback function. This is where we should
// release |img_buffer|.
rtc::Bind(&WrappedI420BufferNoLongerUsedCb, img_buffer)));
- VideoFrame decoded_image;
- decoded_image.set_video_frame_buffer(img_wrapped_buffer);
- decoded_image.set_timestamp(timestamp);
- int ret = decode_complete_callback_->Decoded(decoded_image);
+ I420VideoFrame decoded_image_;
+ decoded_image_.set_video_frame_buffer(img_wrapped_buffer);
+#else
+ decoded_image_.CreateFrame(img->planes[VPX_PLANE_Y],
+ img->planes[VPX_PLANE_U],
+ img->planes[VPX_PLANE_V],
+ img->d_w, img->d_h,
+ img->stride[VPX_PLANE_Y],
+ img->stride[VPX_PLANE_U],
+ img->stride[VPX_PLANE_V]);
+#endif
+ decoded_image_.set_timestamp(timestamp);
+
+ int ret = decode_complete_callback_->Decoded(decoded_image_);
if (ret != 0)
return ret;
return WEBRTC_VIDEO_CODEC_OK;
}
int VP9DecoderImpl::RegisterDecodeCompleteCallback(
DecodedImageCallback* callback) {
decode_complete_callback_ = callback;
--- a/media/webrtc/trunk/webrtc/modules/video_coding/codecs/vp9/vp9_impl.h
+++ b/media/webrtc/trunk/webrtc/modules/video_coding/codecs/vp9/vp9_impl.h
@@ -30,28 +30,26 @@ class VP9EncoderImpl : public VP9Encoder
virtual ~VP9EncoderImpl();
int Release() override;
int InitEncode(const VideoCodec* codec_settings,
int number_of_cores,
size_t max_payload_size) override;
- int Encode(const VideoFrame& input_image,
+ int Encode(const I420VideoFrame& input_image,
const CodecSpecificInfo* codec_specific_info,
- const std::vector<FrameType>* frame_types) override;
+ const std::vector<VideoFrameType>* frame_types) override;
int RegisterEncodeCompleteCallback(EncodedImageCallback* callback) override;
int SetChannelParameters(uint32_t packet_loss, int64_t rtt) override;
int SetRates(uint32_t new_bitrate_kbit, uint32_t frame_rate) override;
- void OnDroppedFrame() override {}
-
struct LayerFrameRefSettings {
int8_t upd_buf = -1; // -1 - no update, 0..7 - update buffer 0..7
int8_t ref_buf1 = -1; // -1 - no reference, 0..7 - reference buffer 0..7
int8_t ref_buf2 = -1; // -1 - no reference, 0..7 - reference buffer 0..7
int8_t ref_buf3 = -1; // -1 - no reference, 0..7 - reference buffer 0..7
};
struct SuperFrameRefSettings {
@@ -70,25 +68,27 @@ class VP9EncoderImpl : public VP9Encoder
void PopulateCodecSpecific(CodecSpecificInfo* codec_specific,
const vpx_codec_cx_pkt& pkt,
uint32_t timestamp);
bool ExplicitlyConfiguredSpatialLayers() const;
bool SetSvcRates();
+#ifdef LIBVPX_SVC
// Used for flexible mode to set the flags and buffer references used
// by the encoder. Also calculates the references used by the RTP
// packetizer.
//
// Has to be called for every frame (keyframes included) to update the
// state used to calculate references.
vpx_svc_ref_frame_config GenerateRefsAndFlags(
const SuperFrameRefSettings& settings);
-
+#endif
+
virtual int GetEncodedLayerFrame(const vpx_codec_cx_pkt* pkt);
// Callback function for outputting packets per spatial layer.
static void EncoderOutputCodedPacketCallback(vpx_codec_cx_pkt* pkt,
void* user_data);
// Determine maximum target for Intra frames
//
@@ -105,17 +105,17 @@ class VP9EncoderImpl : public VP9Encoder
int64_t timestamp_;
uint16_t picture_id_;
int cpu_speed_;
uint32_t rc_max_intra_target_;
vpx_codec_ctx_t* encoder_;
vpx_codec_enc_cfg_t* config_;
vpx_image_t* raw_;
SvcInternal_t svc_internal_;
- const VideoFrame* input_image_;
+ const I420VideoFrame* input_image_;
GofInfoVP9 gof_; // Contains each frame's temporal information for
// non-flexible mode.
uint8_t tl0_pic_idx_; // Only used in non-flexible mode.
size_t frames_since_kf_;
uint8_t num_temporal_layers_;
uint8_t num_spatial_layers_;
// Used for flexible mode.
@@ -146,16 +146,21 @@ class VP9DecoderImpl : public VP9Decoder
int Release() override;
int Reset() override;
private:
int ReturnFrame(const vpx_image_t* img, uint32_t timeStamp);
+#ifndef USE_WRAPPED_I420_BUFFER
+ // Temporarily keep VideoFrame in a separate buffer
+ // Once we debug WrappedI420VideoFrame usage, we can get rid of this
+ I420VideoFrame decoded_image_;
+#endif
// Memory pool used to share buffers between libvpx and webrtc.
Vp9FrameBufferPool frame_buffer_pool_;
DecodedImageCallback* decode_complete_callback_;
bool inited_;
vpx_codec_ctx_t* decoder_;
VideoCodec codec_;
bool key_frame_required_;
};
--- a/media/webrtc/trunk/webrtc/modules/video_coding/main/source/frame_buffer.cc
+++ b/media/webrtc/trunk/webrtc/modules/video_coding/main/source/frame_buffer.cc
@@ -70,16 +70,25 @@ bool VCMFrameBuffer::LayerSync() const {
int VCMFrameBuffer::Tl0PicId() const {
return _sessionInfo.Tl0PicId();
}
bool VCMFrameBuffer::NonReference() const {
return _sessionInfo.NonReference();
}
+void VCMFrameBuffer::SetGofInfo(const GofInfoVP9& gof_info, size_t idx) {
+ _sessionInfo.SetGofInfo(gof_info, idx);
+ // TODO(asapersson): Consider adding hdr->VP9.ref_picture_id for testing.
+ _codecSpecificInfo.codecSpecific.VP9.temporal_idx =
+ gof_info.temporal_idx[idx];
+ _codecSpecificInfo.codecSpecific.VP9.temporal_up_switch =
+ gof_info.temporal_up_switch[idx];
+}
+
bool
VCMFrameBuffer::IsSessionComplete() const {
return _sessionInfo.complete();
}
// Insert packet
VCMFrameBufferEnum
VCMFrameBuffer::InsertPacket(const VCMPacket& packet,
--- a/media/webrtc/trunk/webrtc/modules/video_coding/main/source/jitter_buffer.cc
+++ b/media/webrtc/trunk/webrtc/modules/video_coding/main/source/jitter_buffer.cc
@@ -11,17 +11,17 @@
#include <assert.h>
#include <algorithm>
#include <utility>
#include "webrtc/base/checks.h"
#include "webrtc/base/trace_event.h"
-#include "webrtc/modules/rtp_rtcp/include/rtp_rtcp_defines.h"
+#include "webrtc/modules/rtp_rtcp/interface/rtp_rtcp_defines.h"
#include "webrtc/modules/video_coding/main/interface/video_coding.h"
#include "webrtc/modules/video_coding/main/source/frame_buffer.h"
#include "webrtc/modules/video_coding/main/source/inter_frame_delay.h"
#include "webrtc/modules/video_coding/main/source/internal_defines.h"
#include "webrtc/modules/video_coding/main/source/jitter_buffer_common.h"
#include "webrtc/modules/video_coding/main/source/jitter_estimator.h"
#include "webrtc/modules/video_coding/main/source/packet.h"
#include "webrtc/system_wrappers/interface/clock.h"
@@ -190,17 +190,17 @@ bool Vp9SsMap::TimeForCleanup(uint32_t t
if (ss_map_.empty() || !IsNewerTimestamp(timestamp, ss_map_.begin()->first))
return false;
uint32_t diff = timestamp - ss_map_.begin()->first;
return diff / kVideoPayloadTypeFrequency >= kSsCleanupIntervalSec;
}
void Vp9SsMap::AdvanceFront(uint32_t timestamp) {
- RTC_DCHECK(!ss_map_.empty());
+ DCHECK(!ss_map_.empty());
GofInfoVP9 gof = ss_map_.begin()->second;
ss_map_.erase(ss_map_.begin());
ss_map_[timestamp] = gof;
}
// TODO(asapersson): Update according to updates in RTP payload profile.
bool Vp9SsMap::UpdatePacket(VCMPacket* packet) {
uint8_t gof_idx = packet->codecSpecificHeader.codecHeader.VP9.gof_idx;
--- a/media/webrtc/trunk/webrtc/modules/video_coding/main/source/jitter_buffer.h
+++ b/media/webrtc/trunk/webrtc/modules/video_coding/main/source/jitter_buffer.h
@@ -70,16 +70,47 @@ class FrameList
VCMFrameBuffer* Back() const;
int RecycleFramesUntilKeyFrame(FrameList::iterator* key_frame_it,
UnorderedFrameList* free_frames);
void CleanUpOldOrEmptyFrames(VCMDecodingState* decoding_state,
UnorderedFrameList* free_frames);
void Reset(UnorderedFrameList* free_frames);
};
+class Vp9SsMap {
+ public:
+ typedef std::map<uint32_t, GofInfoVP9, TimestampLessThan> SsMap;
+ bool Insert(const VCMPacket& packet);
+ void Reset();
+
+ // Removes SS data that are older than |timestamp|.
+ // The |timestamp| should be an old timestamp, i.e. packets with older
+ // timestamps should no longer be inserted.
+ void RemoveOld(uint32_t timestamp);
+
+ bool UpdatePacket(VCMPacket* packet);
+ void UpdateFrames(FrameList* frames);
+
+ // Public for testing.
+ // Returns an iterator to the corresponding SS data for the input |timestamp|.
+ bool Find(uint32_t timestamp, SsMap::iterator* it);
+
+ private:
+ // These two functions are called by RemoveOld.
+ // Checks if it is time to do a clean up (done each kSsCleanupIntervalSec).
+ bool TimeForCleanup(uint32_t timestamp) const;
+
+ // Advances the oldest SS data to handle timestamp wrap in cases where SS data
+ // are received very seldom (e.g. only once in beginning, second when
+ // IsNewerTimestamp is not true).
+ void AdvanceFront(uint32_t timestamp);
+
+ SsMap ss_map_;
+};
+
class VCMJitterBuffer {
public:
VCMJitterBuffer(Clock* clock,
EventFactory* event_factory);
virtual ~VCMJitterBuffer();
// Initializes and starts jitter buffer.
void Start();
@@ -210,16 +241,22 @@ class VCMJitterBuffer {
// decodable frames into account.
bool IsContinuousInState(const VCMFrameBuffer& frame,
const VCMDecodingState& decoding_state) const
EXCLUSIVE_LOCKS_REQUIRED(crit_sect_);
// Returns true if |frame| is continuous in the |last_decoded_state_|, taking
// all decodable frames into account.
bool IsContinuous(const VCMFrameBuffer& frame) const
EXCLUSIVE_LOCKS_REQUIRED(crit_sect_);
+ // Looks for frames in |incomplete_frames_| which are continuous in the
+ // provided |decoded_state|. Starts the search from the timestamp of
+ // |decoded_state|.
+ void FindAndInsertContinuousFramesWithState(
+ const VCMDecodingState& decoded_state)
+ EXCLUSIVE_LOCKS_REQUIRED(crit_sect_);
// Looks for frames in |incomplete_frames_| which are continuous in
// |last_decoded_state_| taking all decodable frames into account. Starts
// the search from |new_frame|.
void FindAndInsertContinuousFrames(const VCMFrameBuffer& new_frame)
EXCLUSIVE_LOCKS_REQUIRED(crit_sect_);
VCMFrameBuffer* NextFrame() const EXCLUSIVE_LOCKS_REQUIRED(crit_sect_);
// Returns true if the NACK list was updated to cover sequence numbers up to
// |sequence_number|. If false a key frame is needed to get into a state where
--- a/media/webrtc/trunk/webrtc/modules/video_coding/main/source/session_info.cc
+++ b/media/webrtc/trunk/webrtc/modules/video_coding/main/source/session_info.cc
@@ -54,50 +54,89 @@ int VCMSessionInfo::HighSequenceNumber()
if (packets_.empty())
return empty_seq_num_high_;
if (empty_seq_num_high_ == -1)
return packets_.back().seqNum;
return LatestSequenceNumber(packets_.back().seqNum, empty_seq_num_high_);
}
int VCMSessionInfo::PictureId() const {
- if (packets_.empty() ||
- packets_.front().codecSpecificHeader.codec != kRtpVideoVp8)
+ if (packets_.empty())
return kNoPictureId;
- return packets_.front().codecSpecificHeader.codecHeader.VP8.pictureId;
+ if (packets_.front().codecSpecificHeader.codec == kRtpVideoVp8) {
+ return packets_.front().codecSpecificHeader.codecHeader.VP8.pictureId;
+ } else if (packets_.front().codecSpecificHeader.codec == kRtpVideoVp9) {
+ return packets_.front().codecSpecificHeader.codecHeader.VP9.picture_id;
+ } else {
+ return kNoPictureId;
+ }
}
int VCMSessionInfo::TemporalId() const {
- if (packets_.empty() ||
- packets_.front().codecSpecificHeader.codec != kRtpVideoVp8)
+ if (packets_.empty())
return kNoTemporalIdx;
- return packets_.front().codecSpecificHeader.codecHeader.VP8.temporalIdx;
+ if (packets_.front().codecSpecificHeader.codec == kRtpVideoVp8) {
+ return packets_.front().codecSpecificHeader.codecHeader.VP8.temporalIdx;
+ } else if (packets_.front().codecSpecificHeader.codec == kRtpVideoVp9) {
+ return packets_.front().codecSpecificHeader.codecHeader.VP9.temporal_idx;
+ } else {
+ return kNoTemporalIdx;
+ }
}
bool VCMSessionInfo::LayerSync() const {
- if (packets_.empty() ||
- packets_.front().codecSpecificHeader.codec != kRtpVideoVp8)
+ if (packets_.empty())
return false;
- return packets_.front().codecSpecificHeader.codecHeader.VP8.layerSync;
+ if (packets_.front().codecSpecificHeader.codec == kRtpVideoVp8) {
+ return packets_.front().codecSpecificHeader.codecHeader.VP8.layerSync;
+ } else if (packets_.front().codecSpecificHeader.codec == kRtpVideoVp9) {
+ return
+ packets_.front().codecSpecificHeader.codecHeader.VP9.temporal_up_switch;
+ } else {
+ return false;
+ }
}
int VCMSessionInfo::Tl0PicId() const {
- if (packets_.empty() ||
- packets_.front().codecSpecificHeader.codec != kRtpVideoVp8)
+ if (packets_.empty())
return kNoTl0PicIdx;
- return packets_.front().codecSpecificHeader.codecHeader.VP8.tl0PicIdx;
+ if (packets_.front().codecSpecificHeader.codec == kRtpVideoVp8) {
+ return packets_.front().codecSpecificHeader.codecHeader.VP8.tl0PicIdx;
+ } else if (packets_.front().codecSpecificHeader.codec == kRtpVideoVp9) {
+ return packets_.front().codecSpecificHeader.codecHeader.VP9.tl0_pic_idx;
+ } else {
+ return kNoTl0PicIdx;
+ }
}
bool VCMSessionInfo::NonReference() const {
if (packets_.empty() ||
packets_.front().codecSpecificHeader.codec != kRtpVideoVp8)
return false;
return packets_.front().codecSpecificHeader.codecHeader.VP8.nonReference;
}
+void VCMSessionInfo::SetGofInfo(const GofInfoVP9& gof_info, size_t idx) {
+ if (packets_.empty() ||
+ packets_.front().codecSpecificHeader.codec != kRtpVideoVp9 ||
+ packets_.front().codecSpecificHeader.codecHeader.VP9.flexible_mode) {
+ return;
+ }
+ packets_.front().codecSpecificHeader.codecHeader.VP9.temporal_idx =
+ gof_info.temporal_idx[idx];
+ packets_.front().codecSpecificHeader.codecHeader.VP9.temporal_up_switch =
+ gof_info.temporal_up_switch[idx];
+ packets_.front().codecSpecificHeader.codecHeader.VP9.num_ref_pics =
+ gof_info.num_ref_pics[idx];
+ for (uint8_t i = 0; i < gof_info.num_ref_pics[idx]; ++i) {
+ packets_.front().codecSpecificHeader.codecHeader.VP9.pid_diff[i] =
+ gof_info.pid_diff[idx][i];
+ }
+}
+
void VCMSessionInfo::Reset() {
session_nack_ = false;
complete_ = false;
decodable_ = false;
frame_type_ = kVideoFrameDelta;
packets_.clear();
empty_seq_num_low_ = -1;
empty_seq_num_high_ = -1;
--- a/media/webrtc/trunk/webrtc/modules/video_coding/main/source/session_info.h
+++ b/media/webrtc/trunk/webrtc/modules/video_coding/main/source/session_info.h
@@ -83,16 +83,18 @@ class VCMSessionInfo {
// Returns highest sequence number, media or empty.
int HighSequenceNumber() const;
int PictureId() const;
int TemporalId() const;
bool LayerSync() const;
int Tl0PicId() const;
bool NonReference() const;
+ void SetGofInfo(const GofInfoVP9& gof_info, size_t idx);
+
// The number of packets discarded because the decoder can't make use of
// them.
int packets_not_decodable() const;
private:
enum { kMaxVP8Partitions = 9 };
typedef std::list<VCMPacket> PacketList;