blob: 9187e8b5b8bd93bee4e4960763f668d216a3c915 [file] [log] [blame]
/*
* Copyright 2021 The Chromium OS Authors. All rights reserved.
* Use of this source code is governed by a BSD-style license that can be
* found in the LICENSE file.
*/
// As per https://www.kernel.org/doc/html/v5.4/media/uapi/v4l/dev-decoder.html
#include <ctype.h>
#include <errno.h>
#include <fcntl.h>
#include <getopt.h>
#include <limits.h>
#include <linux/videodev2.h>
#include <openssl/md5.h>
#include <stdbool.h>
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/ioctl.h>
#include <sys/mman.h>
#include <unistd.h>
#include "bitstreams/bitstream_helper.h"
#include "bs_drm.h"
#include "v4l2_macros.h"
static const char* kDecodeDevice = "/dev/video-dec0";
static const int kInputbufferMaxSize = 4 * 1024 * 1024;
static const int kRequestBufferCount = 8;
static const uint32_t kInvalidFrameRate = 0;
// |kMaxRetryCount = 2^24| takes around 20 seconds to exhaust all retries on
// Trogdor when a decode stalls.
static const int kMaxRetryCount = 1 << 24;
static const char* kImageProcessorDevice = "/dev/image-proc0";
static const int kInvalidBufferIndex = -1;
static uint32_t kPreferredUncompressedFourCCs[] = {V4L2_PIX_FMT_NV12,
V4L2_PIX_FMT_MT21C};
static int log_run_level = DEFAULT_LOG_LEVEL;
struct mmap_buffers {
void* start[VIDEO_MAX_PLANES];
size_t length[VIDEO_MAX_PLANES];
struct gbm_bo* bo;
};
struct queue {
int v4lfd;
enum v4l2_buf_type type;
uint32_t fourcc;
struct mmap_buffers* buffers;
// |display_area| describes the bounds of the displayable image relative to
// the image buffer. |left| and |top| are the offset from the top left corner
// in pixels. |width| and |height| describe the displayable image's width and
// height in pixels.
struct v4l2_rect display_area;
// |coded_width| x |coded_height|:
// The size of the encoded frame.
// Usually has an alignment of 16, 32 depending on codec.
uint32_t coded_width;
uint32_t coded_height;
// Contains the number of bytes in every row of video (line_stride) in each
// plane of an MMAP buffer. Copied from the V4L2 multi-planar format
// structure.
uint32_t bytes_per_line[VIDEO_MAX_PLANES];
uint32_t cnt;
uint32_t num_planes;
enum v4l2_memory memory; // V4L2_MEMORY_(MMAP|DMABUF) etc.
uint32_t displayed_frames; // Not valid for OUTPUT queues.
// Used to track the state of OUTPUT and CAPTURE queues. The CAPTURE queue
// stops streaming when it dequeues a buffer with V4L2_BUF_FLAG_LAST set.
// The OUTPUT queue should stop streaming when there are no remaining
// compressed frames to enqueue to it.
bool is_streaming;
};
struct md5_hash {
uint8_t bytes[16];
};
struct data_buffer {
uint8_t* data;
size_t size;
};
bool is_vp9(uint32_t fourcc) {
// VP9 FourCC should match VP9X where 'X' is the version number. This check
// ignores the last character, so VP90, VP91, etc. match.
return (fourcc & 0xff) == 'V' &&
(fourcc >> 8 & 0xff) == 'P' &&
(fourcc >> 16 & 0xff) == '9';
}
char* fourcc_to_string(uint32_t fourcc, char* fourcc_string) {
sprintf(fourcc_string, "%c%c%c%c", fourcc & 0xff, fourcc >> 8 & 0xff,
fourcc >> 16 & 0xff, fourcc >> 24 & 0xff);
// Return the pointer for conveniency.
return fourcc_string;
}
// Verifies that |v4lfd| can be queried for capabilities, and prints them. If
// the ioctl() fails, a FATAL is issued and terminates the program.
void query_driver(int v4lfd) {
struct v4l2_capability cap = {};
const int ret = ioctl(v4lfd, VIDIOC_QUERYCAP, &cap);
if (ret != 0)
LOG_FATAL("VIDIOC_QUERYCAP failed: %s.", strerror(errno));
LOG_INFO("Driver=\"%s\" bus_info=\"%s\" card=\"%s\" fd=0x%x", cap.driver,
cap.bus_info, cap.card, v4lfd);
}
bool is_fourcc_supported(int v4lfd, enum v4l2_buf_type type, uint32_t fourcc) {
struct v4l2_fmtdesc fmtdesc = { .type = type };
while (ioctl(v4lfd, VIDIOC_ENUM_FMT, &fmtdesc) == 0) {
if (fourcc == fmtdesc.pixelformat)
return true;
fmtdesc.index++;
}
return false;
}
// Verifies that |fd| supports |compressed_format| and, if specified,
// |uncompressed_format|. If |uncompressed_format| is not specified, i.e. it's
// equal to V4L2_PIX_FMT_INVALID, then kPreferredUncompressedFourCCs are tried
// instead and the first supported one returned in |uncompressed_format|. If any
// command fails, the function returns false.
bool capabilities(int fd,
const char* fd_name,
uint32_t compressed_format,
uint32_t* uncompressed_format) {
if (!is_fourcc_supported(fd, V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE,
compressed_format)) {
char fourcc_str[FOURCC_SIZE + 1];
LOG_ERROR("%s is not supported for OUTPUT, try running `v4l2-ctl "
"--list-formats-out-ext -d %s` for more info",
fourcc_to_string(compressed_format, fourcc_str), fd_name);
return false;
}
if (*uncompressed_format != V4L2_PIX_FMT_INVALID) {
if (!is_fourcc_supported(fd, V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE,
*uncompressed_format)) {
char fourcc_str[FOURCC_SIZE + 1];
LOG_ERROR("%s is not supported for CAPTURE, try running `v4l2-ctl "
"--list-formats-ext -d %s` for more info",
fourcc_to_string(*uncompressed_format, fourcc_str), fd_name);
return false;
}
} else {
const size_t num_fourccs =
sizeof(kPreferredUncompressedFourCCs) / sizeof(uint32_t);
for (size_t i = 0; i < num_fourccs; ++i) {
if (is_fourcc_supported(fd, V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE,
kPreferredUncompressedFourCCs[i])) {
*uncompressed_format = kPreferredUncompressedFourCCs[i];
return true;
}
}
return false;
}
return true;
}
// The V4L2 device may emit events outside of the buffer data path. For event
// types, see https://www.kernel.org/doc/html/v5.4/media/uapi/v4l/vidioc-dqevent.html#id2
int subscribe_to_event(int v4lfd, uint32_t event_type, uint32_t id) {
struct v4l2_event_subscription sub = {.type = event_type,
.id = id,
.flags = 0};
const int ret = ioctl(v4lfd, VIDIOC_SUBSCRIBE_EVENT, &sub);
if (ret != 0)
LOG_ERROR("VIDIOC_SUBSCRIBE_EVENT failed: %s.", strerror(errno));
return ret;
}
// Dequeues an event from the device. Only subscribed events can be dequeued.
// If there are no events, the return value is non-zero.
int dequeue_event(struct queue* queue, uint32_t* type) {
// v4l2_event structure will be completely filled out by the driver.
struct v4l2_event event;
int ret = ioctl(queue->v4lfd, VIDIOC_DQEVENT, &event);
if (!ret && type)
*type = event.type;
return ret;
}
bool apply_selection_to_queue(struct queue* queue,
struct v4l2_rect display_area) {
struct v4l2_selection selection = {
// |type| has a note in the header:"(do not use *_MPLANE types)".
.type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE,
.target = V4L2_SEL_TGT_CROP,
.flags = 0};
memcpy(&selection.r, &display_area, sizeof(struct v4l2_rect));
if (ioctl(queue->v4lfd, VIDIOC_S_SELECTION, &selection) != 0) {
LOG_ERROR("VIDIOC_S_SELECTION failed: %s.", strerror(errno));
return false;
}
assert(selection.r.left == 0);
assert(selection.r.top == 0);
LOG_INFO("Queue selection %dx%d", selection.r.width, selection.r.height);
return true;
}
int request_mmap_buffers(struct queue* queue,
struct v4l2_requestbuffers* reqbuf) {
const int v4lfd = queue->v4lfd;
const uint32_t buffer_alloc = reqbuf->count * sizeof(struct mmap_buffers);
struct mmap_buffers* buffers = (struct mmap_buffers*)malloc(buffer_alloc);
assert(buffers);
memset(buffers, 0, buffer_alloc);
queue->buffers = buffers;
queue->cnt = reqbuf->count;
int ret;
for (uint32_t i = 0; i < reqbuf->count; ++i) {
struct v4l2_buffer buffer;
struct v4l2_plane planes[VIDEO_MAX_PLANES];
memset(&buffer, 0, sizeof(buffer));
buffer.type = reqbuf->type;
buffer.memory = queue->memory;
buffer.index = i;
buffer.length = queue->num_planes;
buffer.m.planes = planes;
ret = ioctl(v4lfd, VIDIOC_QUERYBUF, &buffer);
if (ret != 0) {
LOG_ERROR("VIDIOC_QUERYBUF failed: %d.", ret);
break;
}
for (uint32_t j = 0; j < queue->num_planes; ++j) {
buffers[i].length[j] = buffer.m.planes[j].length;
buffers[i].start[j] =
mmap(NULL, buffer.m.planes[j].length, PROT_READ | PROT_WRITE,
MAP_SHARED, v4lfd, buffer.m.planes[j].m.mem_offset);
if (MAP_FAILED == buffers[i].start[j]) {
LOG_ERROR("Failed to mmap buffer of length(%d) and offset(0x%x).",
buffer.m.planes[j].length, buffer.m.planes[j].m.mem_offset);
ret = -1;
}
}
}
return ret;
}
int request_dmabuf_buffers(struct gbm_device* gbm,
struct queue* CAPTURE_queue,
uint64_t modifier, const int count) {
int ret = 0;
const uint32_t buffer_alloc = count * sizeof(struct mmap_buffers);
struct mmap_buffers* buffers = (struct mmap_buffers*)malloc(buffer_alloc);
assert(buffers);
memset(buffers, 0, buffer_alloc);
CAPTURE_queue->buffers = buffers;
CAPTURE_queue->cnt = count;
uint32_t gbm_format;
if (CAPTURE_queue->fourcc == V4L2_PIX_FMT_NV12 ||
CAPTURE_queue->fourcc == V4L2_PIX_FMT_MT21C) {
gbm_format = GBM_FORMAT_NV12;
} else if (CAPTURE_queue->fourcc == V4L2_PIX_FMT_P010) {
gbm_format = GBM_FORMAT_P010;
} else {
char fourcc_str[FOURCC_SIZE + 1];
LOG_ERROR("%s format not supported for the CAPTURE_queue",
fourcc_to_string(CAPTURE_queue->fourcc, fourcc_str));
return -1;
}
for (uint32_t i = 0; i < CAPTURE_queue->cnt; ++i) {
const uint32_t width = CAPTURE_queue->coded_width;
const uint32_t height = CAPTURE_queue->coded_height;
struct gbm_bo* bo = gbm_bo_create_with_modifiers(
gbm, width, height, gbm_format, &modifier, /*count=*/1);
assert(bo);
CAPTURE_queue->buffers[i].bo = bo;
}
return ret;
}
// NV12 to I420 conversion.
// This function converts the NV12 |buffer_in| into an I420 buffers stored in a
// struct data_buffer. Ownership of the return structure's |.data| member is
// transferred to the caller. It must be freed elsewhere.
// |buffer_in| is padded, whereas the return buffer is tightly packed.
// Example: |display_area.left| = 0, |display_area.top| = 0,
// |display_area.width| = 8, |display_area.height| = 2, |buffer_width| = 10.
//
// NV12 I420
// YYYYYYYY00 YYYYYYYY
// YYYYYYYY00 YYYYYYYY
// UVUVUVUV00 UUUUVVVV
//
// HW pads 0s for |buffer_width - display_area.left - display_area.width|
// bytes after each row on Trogdor. But other platforms might leave the padding
// uninitialized, and in yet others accessing it might causes a crash of some
// sort (access violation).
struct data_buffer nv12_to_i420(struct v4l2_rect display_area,
uint32_t buffer_width,
uint32_t buffer_height,
uint8_t* buffer_in) {
const size_t y_plane_size = display_area.width * display_area.height;
// Both NV12 and I420 are 4:2:0, so the U and V planes are downsampled by a
// factor of two in width and height respective to the Y plane.
// When an image dimension is odd, round the downsampled dimension up.
const size_t u_plane_left = display_area.left / 2;
const size_t u_plane_top = display_area.top / 2;
const size_t u_plane_width = (display_area.width + 1) / 2;
const size_t u_plane_height = (display_area.height + 1) / 2;
const size_t u_plane_size = u_plane_width * u_plane_height;
struct data_buffer buffer_out = {.data = NULL,
.size = y_plane_size + 2 * u_plane_size};
buffer_out.data = malloc(sizeof(uint8_t) * buffer_out.size);
assert(buffer_out.data);
memset(buffer_out.data, 0, sizeof(uint8_t) * buffer_out.size);
// Copies luma data from |buffer_in| one row at a time
// to avoid touching the padding.
for (int row = 0; row < display_area.height; ++row) {
memcpy(buffer_out.data + row * display_area.width,
buffer_in + display_area.left +
(display_area.top + row) * buffer_width,
display_area.width);
}
uint8_t* u_plane_out = &buffer_out.data[y_plane_size];
uint8_t* v_plane_out = u_plane_out + u_plane_size;
const size_t uv_plane_offset = buffer_width * buffer_height;
for (int row = 0; row < u_plane_height; ++row) {
for (int column = 0; column < u_plane_width; ++column) {
*(u_plane_out + row * u_plane_width + column) =
buffer_in[uv_plane_offset + (u_plane_top + row) * buffer_width +
2 * (u_plane_left + column)];
*(v_plane_out + row * u_plane_width + column) =
buffer_in[uv_plane_offset + (u_plane_top + row) * buffer_width +
2 * (u_plane_left + column) + 1];
}
}
return buffer_out;
}
void compute_and_print_md5hash(const void *data,
size_t len,
uint32_t frame_index) {
struct md5_hash hash;
MD5_CTX ctx;
MD5_Init(&ctx);
MD5_Update(&ctx, data, len);
MD5_Final(hash.bytes, &ctx);
// This printout follows the ways of Tast PlatformDecoding. Don't change !
LOG_INFO("frame # %d - ", frame_index);
for (int n = 0; n < 16; ++n)
printf("%02x", hash.bytes[n]);
printf("\n");
}
// Handles cropping a frame to the displayable area. The returned struct
// data_buffer member |data| must be freed by the caller.
struct data_buffer map_buffers_and_convert_to_i420(struct queue* CAPTURE_queue,
uint32_t queue_index) {
assert(CAPTURE_queue->memory == V4L2_MEMORY_DMABUF ||
CAPTURE_queue->memory == V4L2_MEMORY_MMAP);
// TODO: only handles 8 bit pixels
assert(CAPTURE_queue->fourcc == V4L2_PIX_FMT_NV12);
// Maps the frame buffer into |buffer_nv12| and get the buffer's stride and
// size metadata for cropping and deinterleaving the image.
uint32_t buffer_bytes_per_line = 0;
uint32_t buffer_height = 0;
size_t buffer_size = 0;
uint8_t* buffer_nv12 = NULL;
int bo_fd = 0;
if (CAPTURE_queue->memory == V4L2_MEMORY_DMABUF) {
struct gbm_bo* bo = CAPTURE_queue->buffers[queue_index].bo;
bo_fd = gbm_bo_get_fd(bo);
assert(bo_fd > 0);
buffer_size = lseek(bo_fd, 0, SEEK_END);
lseek(bo_fd, 0, SEEK_SET);
assert(gbm_bo_get_stride_for_plane(bo, 0) ==
gbm_bo_get_stride_for_plane(bo, 1));
buffer_bytes_per_line = gbm_bo_get_stride_for_plane(bo, 0);
buffer_height = gbm_bo_get_height(bo);
buffer_nv12 = mmap(0, buffer_size, PROT_READ, MAP_SHARED, bo_fd, 0);
} else {
assert(CAPTURE_queue->memory == V4L2_MEMORY_MMAP);
// TODO(nhebert) doesn't support multi-planar MMAP buffers
assert(CAPTURE_queue->num_planes == 1);
buffer_bytes_per_line = CAPTURE_queue->bytes_per_line[0];
buffer_height = CAPTURE_queue->coded_height;
buffer_size = CAPTURE_queue->buffers[queue_index].length[0];
buffer_nv12 = CAPTURE_queue->buffers[queue_index].start[0];
}
assert(buffer_bytes_per_line * buffer_height * 3 / 2 <= buffer_size);
// Libvpx golden md5 hashes are calculated in I420 format.
// Deinterleave |buffer_nv12| and apply the display area.
struct data_buffer buffer_i420 = nv12_to_i420(CAPTURE_queue->display_area,
buffer_bytes_per_line,
buffer_height,
buffer_nv12);
if (CAPTURE_queue->memory == V4L2_MEMORY_DMABUF) {
munmap(buffer_nv12, buffer_size);
close(bo_fd);
}
return buffer_i420;
}
// Maps |CAPTURE_queue|'s buffer at |queue_index| and calculates its MD5 sum.
void map_buffers_and_calculate_md5hash(struct queue* CAPTURE_queue,
uint32_t queue_index) {
struct data_buffer buffer_i420 =
map_buffers_and_convert_to_i420(CAPTURE_queue, queue_index);
assert(buffer_i420.data);
compute_and_print_md5hash(buffer_i420.data, buffer_i420.size,
CAPTURE_queue->displayed_frames);
free(buffer_i420.data);
}
// This is the input queue that will take compressed data.
// 4.5.1.5
int setup_OUTPUT(struct queue* OUTPUT_queue,
const uint32_t* optional_width,
const uint32_t* optional_height) {
int ret = 0;
// 1. Sets the coded format on OUTPUT via VIDIOC_S_FMT().
if (!ret) {
struct v4l2_format fmt;
memset(&fmt, 0, sizeof(fmt));
fmt.type = OUTPUT_queue->type;
if (optional_width)
fmt.fmt.pix_mp.width = *optional_width;
if (optional_height)
fmt.fmt.pix_mp.height = *optional_height;
fmt.fmt.pix_mp.pixelformat = OUTPUT_queue->fourcc;
if (OUTPUT_queue->num_planes == 1)
fmt.fmt.pix_mp.plane_fmt[0].sizeimage = kInputbufferMaxSize;
fmt.fmt.pix_mp.num_planes = OUTPUT_queue->num_planes;
int ret = ioctl(OUTPUT_queue->v4lfd, VIDIOC_S_FMT, &fmt);
if (ret != 0)
LOG_ERROR("OUTPUT queue: VIDIOC_S_FMT failed: %s.", strerror(errno));
}
// 2. Allocates source (bytestream) buffers via VIDIOC_REQBUFS() on OUTPUT.
if (!ret) {
struct v4l2_requestbuffers reqbuf = {.count = kRequestBufferCount,
.type = OUTPUT_queue->type,
.memory = OUTPUT_queue->memory};
ret = ioctl(OUTPUT_queue->v4lfd, VIDIOC_REQBUFS, &reqbuf);
if (ret != 0)
LOG_ERROR("OUTPUT queue: VIDIOC_REQBUFS failed: %s.", strerror(errno));
char fourcc[FOURCC_SIZE + 1];
LOG_INFO("OUTPUT queue: %d buffers requested, %d buffers for compressed "
"data (%s) returned.", kRequestBufferCount, reqbuf.count,
fourcc_to_string(OUTPUT_queue->fourcc, fourcc));
if (OUTPUT_queue->memory == V4L2_MEMORY_MMAP)
ret = request_mmap_buffers(OUTPUT_queue, &reqbuf);
}
// 3. Starts streaming on the OUTPUT queue via VIDIOC_STREAMON().
if (!ret) {
ret = ioctl(OUTPUT_queue->v4lfd, VIDIOC_STREAMON, &OUTPUT_queue->type);
if (ret != 0)
LOG_ERROR("OUTPUT queue: VIDIOC_STREAMON failed: %s.", strerror(errno));
else
OUTPUT_queue->is_streaming = true;
}
return ret;
}
// Reads parsed compressed frame data from |file_buf| and submits it to
// |OUTPUT_queue|.
int submit_compressed_data(struct queue* OUTPUT_queue, uint32_t queue_index) {
struct mmap_buffers* buffers = OUTPUT_queue->buffers;
const size_t buf_size = fill_compressed_buffer(
buffers[queue_index].start[0], buffers[queue_index].length[0]);
if (!buf_size)
return 0;
struct v4l2_buffer v4l2_buffer;
struct v4l2_plane planes[VIDEO_MAX_PLANES];
memset(&v4l2_buffer, 0, sizeof(v4l2_buffer));
v4l2_buffer.index = queue_index;
v4l2_buffer.type = OUTPUT_queue->type;
v4l2_buffer.memory = OUTPUT_queue->memory;
v4l2_buffer.length = 1;
v4l2_buffer.m.planes = planes;
v4l2_buffer.m.planes[0].length = buffers[queue_index].length[0];
v4l2_buffer.m.planes[0].bytesused = buf_size;
v4l2_buffer.m.planes[0].data_offset = 0;
int ret = ioctl(OUTPUT_queue->v4lfd, VIDIOC_QBUF, &v4l2_buffer);
if (ret != 0) {
LOG_ERROR("OUTPUT queue: VIDIOC_QBUF failed: %s.", strerror(errno));
return -1;
}
return 0;
}
int prime_OUTPUT(struct queue* OUTPUT_queue) {
int ret = 0;
for (uint32_t i = 0; i < OUTPUT_queue->cnt; ++i) {
ret = submit_compressed_data(OUTPUT_queue, i);
if (ret || is_end_of_stream())
break;
}
return ret;
}
void cleanup_queue(struct queue* queue) {
if (queue->cnt) {
struct mmap_buffers* buffers = queue->buffers;
for (uint32_t i = 0; i < queue->cnt; ++i) {
for (uint32_t j = 0; j < queue->num_planes; ++j) {
if (buffers[i].length[j])
munmap(buffers[i].start[j], buffers[i].length[j]);
}
if (buffers[i].bo)
gbm_bo_destroy(buffers[i].bo);
}
free(queue->buffers);
queue->cnt = 0;
}
}
int queue_buffer_CAPTURE(struct queue* queue, uint32_t index) {
struct v4l2_buffer v4l2_buffer;
struct v4l2_plane planes[VIDEO_MAX_PLANES];
memset(&v4l2_buffer, 0, sizeof v4l2_buffer);
memset(&planes, 0, sizeof planes);
v4l2_buffer.type = queue->type;
v4l2_buffer.memory = queue->memory;
v4l2_buffer.index = index;
v4l2_buffer.m.planes = planes;
v4l2_buffer.length = queue->num_planes;
struct gbm_bo* bo = queue->buffers[index].bo;
for (uint32_t i = 0; i < queue->num_planes; ++i) {
if (queue->memory == V4L2_MEMORY_DMABUF) {
v4l2_buffer.m.planes[i].m.fd = gbm_bo_get_fd_for_plane(bo, i);
assert(v4l2_buffer.m.planes[i].m.fd);
} else if (queue->memory == V4L2_MEMORY_MMAP) {
struct mmap_buffers* buffers = queue->buffers;
v4l2_buffer.m.planes[i].length = buffers[index].length[i];
v4l2_buffer.m.planes[i].bytesused = buffers[index].length[i];
v4l2_buffer.m.planes[i].data_offset = 0;
}
}
int ret = ioctl(queue->v4lfd, VIDIOC_QBUF, &v4l2_buffer);
if (ret != 0)
LOG_ERROR("CAPTURE queue: VIDIOC_QBUF failed: %s.", strerror(errno));
if (queue->memory == V4L2_MEMORY_DMABUF) {
for (uint32_t i = 0; i < queue->num_planes; ++i) {
int ret_close = close(v4l2_buffer.m.planes[i].m.fd);
if (ret_close != 0) {
LOG_ERROR("close failed with v4l2_buffer.m.planes[%d].m.fd: %s.", i,
strerror(errno));
ret = ret_close;
}
}
}
return ret;
}
// This is the output queue that will produce uncompressed frames, see 4.5.1.6
// Capture Setup
// https://www.kernel.org/doc/html/v5.4/media/uapi/v4l/dev-decoder.html#capture-setup
int setup_CAPTURE(struct gbm_device* gbm,
struct queue* CAPTURE_queue,
uint64_t modifier,
uint32_t coded_frame_rate,
bool use_CAPTURE_queue_dimensions) {
int ret = 0;
// In stateful decoders, we insert some encoded chunks in the OUTPUT queue and
// then query (via VIDIOC_G_FMT) the associated CAPTURE queue to learn e.g.
// the decoded dimensions; this follows the process detailed in [1,2]. For the
// Image Processor (MDP) case, we don't want/need to queue frames first,
// because we know the dimensions from the decoding proper. So we simply set
// the format in the |CAPTURE_queue| first.
// [1] https://www.kernel.org/doc/html/v5.4/media/uapi/v4l/dev-decoder.html#initialization
// [2] https://www.kernel.org/doc/html/v5.4/media/uapi/v4l/dev-decoder.html#capture-setup
if (use_CAPTURE_queue_dimensions) {
struct v4l2_format fmt;
memset(&fmt, 0, sizeof(fmt));
fmt.type = CAPTURE_queue->type;
fmt.fmt.pix_mp.pixelformat = CAPTURE_queue->fourcc;
fmt.fmt.pix_mp.width = CAPTURE_queue->display_area.width;
fmt.fmt.pix_mp.height = CAPTURE_queue->display_area.height;
ret = ioctl(CAPTURE_queue->v4lfd, VIDIOC_S_FMT, &fmt);
if (ret != 0)
LOG_ERROR("CAPTURE queue: VIDIOC_S_FMT failed: %s.", strerror(errno));
}
// 1. "Call VIDIOC_G_FMT() on the CAPTURE queue to get format for the
// destination buffers parsed/decoded from the bytestream.""
if (!ret) {
struct v4l2_format fmt;
memset(&fmt, 0, sizeof(fmt));
fmt.type = CAPTURE_queue->type;
ret = ioctl(CAPTURE_queue->v4lfd, VIDIOC_G_FMT, &fmt);
if (ret != 0)
LOG_ERROR("CAPTURE queue: VIDIOC_G_FMT failed: %s.", strerror(errno));
// |CAPTURE_queue| might be configured for Q128, in that case we simply
// ignore it; we'll force |CAPTURE_queue->fourcc| further below.
assert(CAPTURE_queue->fourcc == fmt.fmt.pix_mp.pixelformat ||
fmt.fmt.pix_mp.pixelformat == V4L2_PIX_FMT_NV12_UBWC);
// Oftentimes the driver will give us back the appropriately aligned sizes.
// Pick them up here.
if (CAPTURE_queue->coded_width != fmt.fmt.pix_mp.width ||
CAPTURE_queue->coded_height != fmt.fmt.pix_mp.height) {
LOG_DEBUG("CAPTURE queue provided adjusted coded dimensions: %dx%d -->"
"%dx%d",CAPTURE_queue->coded_width, CAPTURE_queue->coded_height,
fmt.fmt.pix_mp.width , fmt.fmt.pix_mp.height);
}
CAPTURE_queue->coded_width = fmt.fmt.pix_mp.width;
CAPTURE_queue->coded_height = fmt.fmt.pix_mp.height;
if (CAPTURE_queue->num_planes != fmt.fmt.pix_mp.num_planes) {
LOG_DEBUG("CAPTURE queue provided adjusted num planes: %d --> %d",
CAPTURE_queue->num_planes, fmt.fmt.pix_mp.num_planes);
}
CAPTURE_queue->num_planes = fmt.fmt.pix_mp.num_planes;
for (int i = 0; i < CAPTURE_queue->num_planes; ++i) {
CAPTURE_queue->bytes_per_line[i] =
fmt.fmt.pix_mp.plane_fmt[i].bytesperline;
}
char fourcc[FOURCC_SIZE + 1];
LOG_INFO("CAPTURE queue: width=%d, height=%d, format=%s, num_planes=%d",
CAPTURE_queue->coded_width, CAPTURE_queue->coded_height,
fourcc_to_string(CAPTURE_queue->fourcc, fourcc),
CAPTURE_queue->num_planes);
}
// 2. Optional. Acquire the visible resolution via VIDIOC_G_SELECTION.
if (!ret) {
struct v4l2_selection selection;
memset(&selection, 0, sizeof(selection));
selection.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
selection.target = V4L2_SEL_TGT_COMPOSE;
ret = ioctl(CAPTURE_queue->v4lfd, VIDIOC_G_SELECTION, &selection);
if (ret != 0)
LOG_ERROR("VIDIOC_G_SELECTION failed: %s.", strerror(errno));
if (CAPTURE_queue->display_area.left != selection.r.left ||
CAPTURE_queue->display_area.top != selection.r.top ||
CAPTURE_queue->display_area.width != selection.r.width ||
CAPTURE_queue->display_area.height != selection.r.height) {
LOG_DEBUG("CAPTURE queue visible area changed from %dx%d+%d+%d to "
"%dx%d+%d+%d",
CAPTURE_queue->display_area.width, CAPTURE_queue->display_area.height,
CAPTURE_queue->display_area.left, CAPTURE_queue->display_area.top,
selection.r.width , selection.r.height,
selection.r.left , selection.r.top);
}
CAPTURE_queue->display_area = selection.r;
LOG_INFO("DisplayLeft=%d, DisplayTop=%d, DisplayWidth=%d, DisplayHeight=%d",
selection.r.left, selection.r.top,
selection.r.width, selection.r.height);
}
// 4. "Optional. Set the CAPTURE format via VIDIOC_S_FMT() on the CAPTURE
// queue. The client may choose a different format than selected/suggested by
// the decoder in VIDIOC_G_FMT()."
if (!ret) {
struct v4l2_format fmt;
memset(&fmt, 0, sizeof(fmt));
fmt.type = CAPTURE_queue->type;
fmt.fmt.pix_mp.pixelformat = CAPTURE_queue->fourcc;
fmt.fmt.pix_mp.width = CAPTURE_queue->coded_width;
fmt.fmt.pix_mp.height = CAPTURE_queue->coded_height;
ret = ioctl(CAPTURE_queue->v4lfd, VIDIOC_S_FMT, &fmt);
if (ret != 0)
LOG_ERROR("CAPTURE queue: VIDIOC_S_FMT failed: %s.", strerror(errno));
}
// 5. (Optional) Sets the coded frame interval on the CAPTURE queue via
// VIDIOC_S_PARM. Support for this feature is signaled by the
// V4L2_FMT_FLAG_ENC_CAP_FRAME_INTERVAL format flag.
if (!ret && coded_frame_rate != kInvalidFrameRate) {
struct v4l2_streamparm parms;
memset(&parms, 0, sizeof(parms));
parms.type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
// TODO(b/202758590): Handle non-integral coded_frame_rate
parms.parm.output.timeperframe.numerator = 1;
parms.parm.output.timeperframe.denominator = coded_frame_rate;
LOG_INFO("Time per frame set to %d/%d seconds",
parms.parm.output.timeperframe.numerator,
parms.parm.output.timeperframe.denominator);
ret = ioctl(CAPTURE_queue->v4lfd, VIDIOC_S_PARM, &parms);
if (ret != 0)
LOG_ERROR("CAPTURE queue: VIDIOC_S_PARM failed: %s.", strerror(errno));
}
// 10. Allocates CAPTURE buffers via VIDIOC_REQBUFS() on the CAPTURE queue.
if (!ret) {
struct v4l2_requestbuffers reqbuf;
memset(&reqbuf, 0, sizeof(reqbuf));
reqbuf.count = kRequestBufferCount;
reqbuf.type = CAPTURE_queue->type;
reqbuf.memory = CAPTURE_queue->memory;
ret = ioctl(CAPTURE_queue->v4lfd, VIDIOC_REQBUFS, &reqbuf);
if (ret != 0)
LOG_ERROR("CAPTURE queue: VIDIOC_REQBUFS failed: %s.", strerror(errno));
char fourcc[FOURCC_SIZE + 1];
LOG_INFO("CAPTURE queue: %d buffers requested, %d buffers for decoded data "
" (%s) returned.", kRequestBufferCount, reqbuf.count,
fourcc_to_string(CAPTURE_queue->fourcc, fourcc));
if (CAPTURE_queue->memory == V4L2_MEMORY_DMABUF)
ret = request_dmabuf_buffers(gbm, CAPTURE_queue, modifier, reqbuf.count);
else if (CAPTURE_queue->memory == V4L2_MEMORY_MMAP)
ret = request_mmap_buffers(CAPTURE_queue, &reqbuf);
else
ret = -1;
if (!ret) {
for (uint32_t i = 0; i < reqbuf.count; ++i) {
ret = queue_buffer_CAPTURE(CAPTURE_queue, i);
if (ret != 0)
break;
}
}
}
// 11. Calls VIDIOC_STREAMON() on the CAPTURE queue to start decoding frames.
if (!ret) {
ret = ioctl(CAPTURE_queue->v4lfd, VIDIOC_STREAMON, &CAPTURE_queue->type);
if (ret != 0)
LOG_ERROR("VIDIOC_STREAMON failed: %s.", strerror(errno));
else
CAPTURE_queue->is_streaming = true;
}
return ret;
}
// Writes a single frame to disk. The file path is based on the input file name
// and the frame characteristics. For example:
// Input file: /path/file.ivf
// Frame number: 12
// Image width: 800
// Image height: 450
// This results in a file written to: /path/file_frame_0012_800x450_I420.yuv
void write_frame_to_disk(const char* output_file_prefix,
struct queue* CAPTURE_queue,
uint32_t queue_index) {
// TODO(b/204566257) doesn't support writing to a single file
#define FILE_NAME_FORMAT "%s_frame_%04d_%dx%d_I420.yuv"
// Update FILE_NAME_FORMAT if this assert is triggered.
assert(CAPTURE_queue->displayed_frames <= 9999);
const int sz = snprintf(NULL, 0, FILE_NAME_FORMAT, output_file_prefix,
CAPTURE_queue->displayed_frames,
CAPTURE_queue->display_area.width,
CAPTURE_queue->display_area.height);
char file_name[sz + 1]; // note +1 for terminating null byte
snprintf(file_name, sizeof file_name, FILE_NAME_FORMAT, output_file_prefix,
CAPTURE_queue->displayed_frames, CAPTURE_queue->display_area.width,
CAPTURE_queue->display_area.height);
#undef FILE_NAME_FORMAT
FILE* fp = fopen(file_name, "wb");
if (!fp)
LOG_ERROR("Unable to open output yuv file: %s.", file_name);
if (CAPTURE_queue->fourcc == V4L2_PIX_FMT_YUV420M) {
// Technically this path could also support V4L2_PIX_FMT_YVU420 and other
// I420 variations, but it's not properly tested/commonly utilized.
const size_t num_coded_pixels = CAPTURE_queue->coded_width *
CAPTURE_queue->coded_height;
uint8_t* y_buffer = CAPTURE_queue->buffers[queue_index].start[0];
fwrite(y_buffer, num_coded_pixels, 1, fp);
for (uint32_t i = 1; i < CAPTURE_queue->num_planes; ++i) {
const size_t uv_plane_size = ((CAPTURE_queue->coded_width + 1) / 2) *
((CAPTURE_queue->coded_height + 1) / 2);
uint8_t* chroma_buffer = CAPTURE_queue->buffers[queue_index].start[i];
fwrite(chroma_buffer, uv_plane_size, 1, fp);
}
} else {
// TODO(b/204566257) doesn't support writing full decoded buffers
struct data_buffer buffer_i420 =
map_buffers_and_convert_to_i420(CAPTURE_queue, queue_index);
fwrite(buffer_i420.data, buffer_i420.size, 1, fp);
}
fclose(fp);
}
int dequeue_buffer(struct queue* queue,
uint32_t* index,
uint32_t* bytesused,
uint32_t* flags) {
struct v4l2_buffer v4l2_buffer;
struct v4l2_plane planes[VIDEO_MAX_PLANES] = {0};
memset(&v4l2_buffer, 0, sizeof(v4l2_buffer));
v4l2_buffer.type = queue->type;
v4l2_buffer.memory = queue->memory;
// "Applications call the VIDIOC_DQBUF ioctl to dequeue [...]. They just
// set the type, memory and reserved fields of a struct v4l2_buffer as
// above, when VIDIOC_DQBUF is called with a pointer to this structure the
// driver fills the remaining fields or returns an error code."
// https://www.kernel.org/doc/html/v4.19/media/uapi/v4l/vidioc-qbuf.html
// Mediatek 8173 however ,needs the |length| field filed in.
v4l2_buffer.length = queue->num_planes;
v4l2_buffer.m.planes = planes;
v4l2_buffer.m.planes[0].bytesused = 0;
int ret = ioctl(queue->v4lfd, VIDIOC_DQBUF, &v4l2_buffer);
if (index)
*index = v4l2_buffer.index;
if (bytesused)
*bytesused = v4l2_buffer.m.planes[0].bytesused;
if (flags)
*flags = v4l2_buffer.flags;
return ret;
}
// 4.5.1.10. Drain
// https://www.kernel.org/doc/html/v5.4/media/uapi/v4l/dev-decoder.html#drain
// initiate_drain should be called when the source ends or when the requested
// number of frames has been met.
int initiate_drain(struct queue* OUTPUT_queue) {
// 1. Begin the drain sequence by issuing VIDIOC_DECODER_CMD().
struct v4l2_decoder_cmd cmd;
memset(&cmd, 0, sizeof(cmd));
cmd.cmd = V4L2_DEC_CMD_STOP;
// V4L2_DEC_CMD_STOP may not be supported, but we haven't run into
// a driver that doesn't support V4L2_DEC_CMD_STOP cmd.
int ret = ioctl(OUTPUT_queue->v4lfd, VIDIOC_DECODER_CMD, &cmd);
if (!ret)
OUTPUT_queue->is_streaming = false;
// 2. The decode loop needs to proceed normally as long as the CAPTURE
// queue has buffers that can be dequeued.
// This step occurs in the main |decode| loop.
return ret;
}
int finish_drain(struct queue* OUTPUT_queue, struct queue* CAPTURE_queue) {
// Steps 1 and 2 in the drain sequence have already occurred when this is
// called.
int ret = 0;
// 3. Reset by issuing VIDIOC_STREAMOFF
int ret_streamoff =
ioctl(OUTPUT_queue->v4lfd, VIDIOC_STREAMOFF, &OUTPUT_queue->type);
if (ret_streamoff != 0) {
LOG_ERROR("VIDIOC_STREAMOFF failed on OUTPUT: %s", strerror(errno));
ret = ret_streamoff;
}
ret_streamoff =
ioctl(CAPTURE_queue->v4lfd, VIDIOC_STREAMOFF, &CAPTURE_queue->type);
if (ret_streamoff != 0) {
LOG_ERROR("VIDIOC_STREAMOFF failed on CAPTURE: %s", strerror(errno));
ret = ret_streamoff;
}
return ret;
}
typedef bool (*process_opaque_frame_function)(struct queue*,
uint32_t,
struct queue*,
struct queue*,
int*);
// This function gets a single opaque frame from |CAPTURE_queue| and enqueues it
// in the |mdp_OUTPUT_queue| to be processed by the MDP/Image Processor; it then
// waits for it to produce a single "clear" frame in |mdp_CAPTURE_queue|.
bool process_opaque_frame(struct queue* CAPTURE_queue,
uint32_t CAPTURE_queue_index,
struct queue* mdp_OUTPUT_queue,
struct queue* mdp_CAPTURE_queue,
int* mdp_CAPTURE_queue_dequeued_buffer_index)
{
assert(CAPTURE_queue->memory == V4L2_MEMORY_MMAP);
assert(CAPTURE_queue->fourcc == mdp_OUTPUT_queue->fourcc);
assert(mdp_OUTPUT_queue->memory == V4L2_MEMORY_DMABUF);
// Insert the opaque buffer from the |CAPTURE_queue| into the Image Processor
// via the |mdp_OUTPUT_queue|. |CAPTURE_queue| and |mdp_OUTPUT_queue| are
// supposed to be of type V4L2_MEMORY_MMAP and V4L2_MEMORY_DMABUF, resp. To
// avoid a copy, we extract the DmaBufs under |CAPTURE_queue| using
// VIDIOC_EXPBUF and "import" them into |mdp_OUTPUT_queue|.
{
struct v4l2_buffer v4l2_buffer;
memset(&v4l2_buffer, 0, sizeof(v4l2_buffer));
// Re-use the CAPTURE_queue index. This is for simplicity in this file:
// every time we dequeue an opaque frame from |CAPTURE_queue| we enqueue it
// in |mdp_OUTPUT_queue|.
v4l2_buffer.index = CAPTURE_queue_index;
v4l2_buffer.type = mdp_OUTPUT_queue->type;
v4l2_buffer.bytesused = 0; // "unused (set to 0) for multiplanar buffers"
v4l2_buffer.flags = V4L2_BUF_FLAG_QUEUED | V4L2_BUF_FLAG_TIMESTAMP_COPY;
v4l2_buffer.memory = mdp_OUTPUT_queue->memory;
// |length|:"number of elements in the planes array for multi-plane buffers"
v4l2_buffer.length = mdp_OUTPUT_queue->num_planes;
// Note: although later specs mandated that |v4l2_buffer.timestamp| had to
// be filled in, it doesn't seem necessary in older kernels.
struct v4l2_plane planes[VIDEO_MAX_PLANES] = { 0 };
v4l2_buffer.m.planes = planes;
for (int i = 0; i < mdp_OUTPUT_queue->num_planes; ++i) {
// Export the DmaBuf out of the |CAPTURE_queue| |queue_index| and use it
// for |v4l2_buffer|.
struct v4l2_exportbuffer expbuf;
memset(&expbuf, 0, sizeof(expbuf));
expbuf.type = CAPTURE_queue->type;
expbuf.index = CAPTURE_queue_index;
expbuf.plane = i;
const int ret = ioctl(CAPTURE_queue->v4lfd, VIDIOC_EXPBUF, &expbuf);
if (ret != 0) {
LOG_ERROR("Image Processor: mdp_OUTPUT_queue VIDIOC_EXPBUF failed: %s.",
strerror(errno));
return false;
}
v4l2_buffer.m.planes[i].m.fd = expbuf.fd;
v4l2_buffer.m.planes[i].length =
CAPTURE_queue->buffers[CAPTURE_queue_index].length[i];
v4l2_buffer.m.planes[i].bytesused =
CAPTURE_queue->buffers[CAPTURE_queue_index].length[i];
assert(v4l2_buffer.m.planes[i].data_offset == 0);
}
const int ret = ioctl(mdp_OUTPUT_queue->v4lfd, VIDIOC_QBUF, &v4l2_buffer);
// VIDIOC_EXPBUF opens file descriptors that need to be closed.
for (int i = 0; i < mdp_OUTPUT_queue->num_planes; ++i) {
close(v4l2_buffer.m.planes[i].m.fd);
}
if (ret != 0) {
LOG_ERROR("Image Processor: mdp_OUTPUT_queue VIDIOC_QBUF failed: %s.",
strerror(errno));
return false;
} else {
LOG_DEBUG("Image Processor: enqueued frame in mdp_OUTPUT_queue with index"
" %d.", CAPTURE_queue_index);
}
}
// Try to dequeue a "clear" buffer from |mdp_CAPTURE_queue|. We try as long as
// VIDIOC_DQBUF returns EAGAIN, indicating that the device is busy (hopefully
// converting our opaque frames). If a buffer is dequeued, we reenqueue it
// immediately (note that it won't be reused until a couple of calls later
// since the indexes are used in round-robin fashion).
{
struct v4l2_buffer v4l2_buffer;
memset(&v4l2_buffer, 0, sizeof(v4l2_buffer));
v4l2_buffer.type = mdp_CAPTURE_queue->type;
v4l2_buffer.memory = mdp_CAPTURE_queue->memory;
struct v4l2_plane planes[VIDEO_MAX_PLANES] = { 0 };
v4l2_buffer.m.planes = planes;
v4l2_buffer.length = mdp_CAPTURE_queue->num_planes;
int num_tries = kMaxRetryCount;
bool keep_trying = false;
do {
const int ret =
ioctl(mdp_CAPTURE_queue->v4lfd, VIDIOC_DQBUF, &v4l2_buffer);
if (ret != 0 && errno != EAGAIN) {
LOG_ERROR("Image Processor: mdp_CAPTURE_queue VIDIOC_DQBUF failed.");
return false;
}
keep_trying = (ret != 0 && errno == EAGAIN);
num_tries--;
} while(keep_trying && num_tries != 0);
if (num_tries == 0) {
LOG_ERROR("Image Processor: timed out waiting for a clear frame.");
return false;
}
LOG_DEBUG("Image Processor: dequeued frame from mdp_CAPTURE_queue with "
"index %d.", v4l2_buffer.index);
*mdp_CAPTURE_queue_dequeued_buffer_index = v4l2_buffer.index;
const int ret =
ioctl(mdp_CAPTURE_queue->v4lfd, VIDIOC_QBUF, &v4l2_buffer);
if (ret != 0) {
LOG_ERROR("Image Processor: mdp_CAPTURE_queue VIDIOC_QBUF failed: %s.",
strerror(errno));
return false;
} else {
LOG_DEBUG("Image Processor: (re)enqueued frame in mdp_CAPTURE_queue "
"with index %d.", v4l2_buffer.index);
}
}
// If we're here, we have dequeued a "clear" buffer from |mdp_CAPTURE_queue|;
// dequeue a buffer from |mdp_OUTPUT_queue|: the MDP has finished processing
// one and we can reuse that slot in future function calls.
{
uint32_t index = 0;
const int ret = dequeue_buffer(mdp_OUTPUT_queue, &index, /*bytesused=*/NULL,
/*flags=*/NULL);
if (ret != 0) {
LOG_ERROR("Image Processor: mdp_OUTPUT_queue VIDIOC_DQBUF failed: %s.",
strerror(errno));
return false;
} else {
LOG_DEBUG("Image Processor: (re)dequeued frame in mdp_OUTPUT_queue with "
"index %d.", index);
}
}
return true;
}
// From 4.5.1.9. Dynamic Resolution Change
// This is called after a source change event is caught, dequeued,
// and all pending buffers in the |CAPTURE_queue| are drained.
int handle_dynamic_resolution_change(struct gbm_device* gbm,
struct queue* CAPTURE_queue,
uint64_t modifier) {
// Calls VIDIOC_STREAMOFF to stop the CAPTURE queue stream.
int ret = ioctl(CAPTURE_queue->v4lfd, VIDIOC_STREAMOFF, &CAPTURE_queue->type);
if (ret != 0)
LOG_ERROR("VIDIOC_STREAMOFF failed: %s.", strerror(errno));
// Deallocates |CAPTURE_queue| buffers via VIDIOC_REQBUFS. Calling
// VIDIOC_REQBUFS with |count == 0| tells the device to deallocate the
// existing buffers.
if (!ret) {
cleanup_queue(CAPTURE_queue);
struct v4l2_requestbuffers reqbuf = {.count = 0,
.type = CAPTURE_queue->type,
.memory = CAPTURE_queue->memory};
ret = ioctl(CAPTURE_queue->v4lfd, VIDIOC_REQBUFS, &reqbuf);
if (ret != 0) {
LOG_ERROR("CAPTURE queue: VIDIOC_REQBUFS failed while deallocating "
"buffers: %s.", strerror(errno));
}
}
// setup_CAPTURE will call VIDIOC_STREAMON as part of the capture setup
// sequence, which starts capture queue streaming.
if (!ret) {
ret = setup_CAPTURE(gbm, CAPTURE_queue, modifier, kInvalidFrameRate,
/*use_CAPTURE_queue_dimensions=*/false);
}
return ret;
}
int decode(struct gbm_device* gbm,
struct queue* CAPTURE_queue,
struct queue* OUTPUT_queue,
uint64_t modifier,
const char* output_file_prefix,
uint32_t frames_to_decode,
bool print_md5hash,
process_opaque_frame_function process_opaque_frame_fp,
struct queue* mdp_OUTPUT_queue,
struct queue* mdp_CAPTURE_queue) {
int ret = 0;
// If no buffers have been dequeued for more than |kMaxRetryCount| retries, we
// should exit the program. Something is wrong in the decoder or with how we
// are controlling it.
int num_tries = kMaxRetryCount;
// TODO(nhebert) See if it is possible to have two events dequeue before
// the first is handled. Assume "no" for now.
bool pending_dynamic_resolution_change = false;
while (ret == 0 && CAPTURE_queue->is_streaming && num_tries != 0) {
{
uint32_t event_type = 0;
if (!dequeue_event(CAPTURE_queue, &event_type) &&
event_type == V4L2_EVENT_SOURCE_CHANGE) {
LOG_INFO("CAPTURE queue experienced a source change.");
pending_dynamic_resolution_change = true;
}
uint32_t index = 0;
uint32_t flags = 0;
uint32_t bytesused = 0;
const int ret_dequeue = dequeue_buffer(CAPTURE_queue, &index, &bytesused,
&flags);
if (ret_dequeue != 0) {
if (errno != EAGAIN) {
LOG_ERROR("VIDIOC_DQBUF failed for CAPTURE queue: %s.",
strerror(errno));
ret = ret_dequeue;
break;
}
num_tries--;
} else {
// Successfully dequeued a buffer. Reset the |num_tries| counter.
num_tries = kMaxRetryCount;
const bool is_flag_error_set = (flags & V4L2_BUF_FLAG_ERROR) != 0;
const bool is_flag_last_set = (flags & V4L2_BUF_FLAG_LAST) != 0;
if (is_flag_last_set)
CAPTURE_queue->is_streaming = false;
// Don't use a buffer flagged with V4L2_BUF_FLAG_ERROR regardless of
// |bytesused| or with V4L2_BUF_FLAG_LAST and |bytesused| == 0
const bool ignore_buffer = is_flag_error_set ||
(is_flag_last_set && (bytesused == 0));
if (!ignore_buffer &&
CAPTURE_queue->displayed_frames < frames_to_decode) {
CAPTURE_queue->displayed_frames++;
if (process_opaque_frame_fp) {
int mdp_CAPTURE_queue_buffer_index = kInvalidBufferIndex;
assert(V4L2_MEMORY_MMAP == CAPTURE_queue->memory);
if (!process_opaque_frame_fp(CAPTURE_queue,
index,
mdp_OUTPUT_queue,
mdp_CAPTURE_queue,
&mdp_CAPTURE_queue_buffer_index)){
LOG_ERROR("Image Processor failed.");
return -1;
}
mdp_CAPTURE_queue->displayed_frames++;
if (output_file_prefix &&
mdp_CAPTURE_queue_buffer_index != kInvalidBufferIndex) {
write_frame_to_disk(output_file_prefix, mdp_CAPTURE_queue,
mdp_CAPTURE_queue_buffer_index);
}
if (print_md5hash &&
mdp_CAPTURE_queue_buffer_index != kInvalidBufferIndex) {
compute_and_print_md5hash(
mdp_CAPTURE_queue->buffers[mdp_CAPTURE_queue_buffer_index]
.start[0],
mdp_CAPTURE_queue->buffers[mdp_CAPTURE_queue_buffer_index]
.length[0],
CAPTURE_queue->displayed_frames);
}
} else {
if (output_file_prefix)
write_frame_to_disk(output_file_prefix, CAPTURE_queue, index);
if (print_md5hash)
map_buffers_and_calculate_md5hash(CAPTURE_queue, index);
}
} else {
LOG_DEBUG("Buffer set %s%s%s.",
is_flag_error_set ? "V4L2_BUF_FLAG_ERROR" : "",
(is_flag_error_set && is_flag_last_set) ? " and " : "",
is_flag_last_set ? "V4L2_BUF_FLAG_LAST" : "");
}
// When the device has decoded |frames_to_decode| displayable frames,
// start the drain sequence.
if (!ret && OUTPUT_queue->is_streaming &&
CAPTURE_queue->displayed_frames >= frames_to_decode) {
ret = initiate_drain(OUTPUT_queue);
}
// Done with buffer, queue it back up unless we got V4L2_BUF_FLAG_LAST
if (!ret && !is_flag_last_set)
ret = queue_buffer_CAPTURE(CAPTURE_queue, index);
if (!ret && pending_dynamic_resolution_change && is_flag_last_set) {
LOG_DEBUG("Handling dynamic resolution change.");
pending_dynamic_resolution_change = false;
ret = handle_dynamic_resolution_change(gbm, CAPTURE_queue, modifier);
}
}
}
// Check the OUTPUT queue for free buffers and fill accordingly.
if (!ret) {
uint32_t index = 0;
const int ret_dequeue = dequeue_buffer(OUTPUT_queue, &index, NULL,
NULL);
if (ret_dequeue != 0) {
if (errno != EAGAIN) {
LOG_ERROR("VIDIOC_DQBUF failed for OUTPUT queue: %s.",
strerror(errno));
ret = ret_dequeue;
break;
}
continue;
}
if (OUTPUT_queue->is_streaming) {
ret = submit_compressed_data(OUTPUT_queue, index);
// If there are no remaining frames, we should stop the OUTPUT queue.
// Doing so, lets the decoder know it should process the remaining
// buffers in the output queue, then dequeue a buffer with
// V4L2_BUF_FLAG_LAST set. After that happens, the decode loop will
// stop.
if (!ret && is_end_of_stream())
ret = initiate_drain(OUTPUT_queue);
}
}
}
if (num_tries == 0) {
LOG_FATAL("Decoder appeared to stall after decoding %d frames.",
CAPTURE_queue->displayed_frames);
}
finish_drain(OUTPUT_queue, CAPTURE_queue);
LOG_INFO("%d displayable frames decoded.", CAPTURE_queue->displayed_frames);
return ret;
}
static void print_help(const char* argv0) {
printf("usage: %s [OPTIONS]\n", argv0);
printf(" -f, --file ivf file to decode\n");
printf(" -w, --write write visible frames in I420 format\n");
printf(" -m, --max max number of visible frames to decode\n");
printf(" -a, --mmap use mmap instead of dmabuf\n");
printf(" -c, --capture_fmt fourcc of the CAPTURE queue, i.e. the "
"decoded video format\n");
printf(" -l, --log_level specifies log level, 0:debug 1:info 2:error "
"3:fatal (default: %d)\n", DEFAULT_LOG_LEVEL);
printf(" -r, --frame_rate (optional) specify a frame rate (Hz)\n");
printf(" -d, --md5 compute md5 hash for each decoded visible frame "
"in I420 format\n");
}
static const struct option longopts[] = {
{"file", required_argument, NULL, 'f'},
{"write", no_argument, NULL, 'w'},
{"max", required_argument, NULL, 'm'},
{"mmap", no_argument, NULL, 'a'},
{"capture_fmt", required_argument, NULL, 'c'},
{"log_level", required_argument, NULL, 'l'},
{"md5", no_argument, NULL, 'd'},
{"frame_rate", required_argument, NULL, 'r'},
{0, 0, 0, 0},
};
int main(int argc, char* argv[]) {
int c;
char* file_name = NULL;
bool write_out = false;
bool print_md5hash = false;
uint32_t frames_to_decode = UINT_MAX;
uint32_t frame_rate = kInvalidFrameRate;
uint64_t modifier = DRM_FORMAT_MOD_LINEAR;
uint32_t uncompressed_fourcc = V4L2_PIX_FMT_INVALID;
enum v4l2_memory CAPTURE_memory = V4L2_MEMORY_DMABUF;
while ((c = getopt_long(argc, argv, "f:m:wac:l:dr:", longopts, NULL)) != -1) {
switch (c) {
case 'f':
file_name = strdup(optarg);
break;
case 'm':
frames_to_decode = atoi(optarg);
break;
case 'w':
write_out = true;
break;
case 'a':
CAPTURE_memory = V4L2_MEMORY_MMAP;
break;
case 'c':
if (strlen(optarg) == 4) {
uncompressed_fourcc =
v4l2_fourcc(toupper(optarg[0]), toupper(optarg[1]),
toupper(optarg[2]), toupper(optarg[3]));
char fourcc[FOURCC_SIZE + 1];
LOG_INFO("User-provided CAPTURE format: %s",
fourcc_to_string(uncompressed_fourcc, fourcc));
if (uncompressed_fourcc == V4L2_PIX_FMT_NV12_UBWC) {
modifier = DRM_FORMAT_MOD_QCOM_COMPRESSED;
LOG_INFO("CAPTURE format is compressed, setting modifier.");
}
}
break;
case 'l': {
const int specified_log_run_level = atoi(optarg);
if (specified_log_run_level >= kLoggingLevelMax) {
LOG_INFO("Undefined log level %d, using default log level instead.",
specified_log_run_level);
} else {
log_run_level = specified_log_run_level;
}
break;
}
case 'd':
print_md5hash = true;
break;
case 'r':
frame_rate = atoi(optarg);
if (frame_rate == kInvalidFrameRate)
{
LOG_FATAL("Invalid frame rate provided - %s. --frame_rate requires "
"positive integer less than 2^32.", optarg);
}
break;
default:
break;
}
}
LOG_INFO("Simple v4l2 decode.");
if (frames_to_decode != UINT_MAX)
LOG_INFO("Only decoding a max of %d frames.", frames_to_decode);
if (!file_name) {
print_help(argv[0]);
exit(1);
}
int drm_device_fd = bs_drm_open_main_display();
if (drm_device_fd < 0) {
LOG_FATAL("Failed to open card for display.");
}
struct gbm_device* gbm = gbm_create_device(drm_device_fd);
if (!gbm) {
close(drm_device_fd);
LOG_FATAL("Failed to create gbm device.");
}
init_bitstream(file_name);
char* output_file_prefix = NULL;
if (write_out) {
const size_t len_prefix = strrchr(file_name, '.') - file_name;
output_file_prefix = strndup(file_name, len_prefix);
}
int v4lfd = open(kDecodeDevice, O_RDWR | O_NONBLOCK | O_CLOEXEC);
if (v4lfd < 0)
LOG_FATAL("Unable to open device file: %s.", kDecodeDevice);
query_driver(v4lfd);
if (!capabilities(v4lfd, kDecodeDevice, get_fourcc(), &uncompressed_fourcc))
LOG_FATAL("Not enough capabilities present for decoding.");
assert(uncompressed_fourcc != V4L2_PIX_FMT_INVALID);
struct queue OUTPUT_queue = {.v4lfd = v4lfd,
.type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE,
.fourcc = get_fourcc(),
.num_planes = 1,
.memory = V4L2_MEMORY_MMAP,
.displayed_frames = 0,
.is_streaming = false};
int ret = setup_OUTPUT(&OUTPUT_queue, /*optional_width=*/NULL,
/*optional_height=*/NULL);
if (!ret)
ret = prime_OUTPUT(&OUTPUT_queue);
struct queue CAPTURE_queue = {.v4lfd = v4lfd,
.type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE,
.fourcc = uncompressed_fourcc,
.num_planes = 1,
.memory = CAPTURE_memory,
.displayed_frames = 0,
.is_streaming = false};
if (!ret) {
ret = setup_CAPTURE(gbm, &CAPTURE_queue, modifier, frame_rate,
/*use_CAPTURE_queue_dimensions=*/false);
}
process_opaque_frame_function process_opaque_frame_fp = NULL;
int mdp_fd = -1;
struct queue* mdp_OUTPUT_queue = NULL;
struct queue* mdp_CAPTURE_queue = NULL;
if (!ret && uncompressed_fourcc == V4L2_PIX_FMT_MT21C) {
char fourcc[FOURCC_SIZE + 1];
fourcc_to_string(uncompressed_fourcc, fourcc);
LOG_INFO("Image Processor: needed, %s is an opaque format.", fourcc);
// MT21C is a specific opaque MediaTek format and seems, sadly,
// undocumented, forcing the use of a specific ImageProcessor, called by
// MediaTek Media Data Path (MDP) to produce a "clear" format. This format
// was controversial:
// https://groups.google.com/g/linux.kernel/c/4nijzmKfdak/m/pWUN7HQBBAAJ
mdp_fd = open(kImageProcessorDevice, O_RDWR | O_NONBLOCK | O_CLOEXEC);
if (mdp_fd < 0) {
LOG_FATAL("Image Processor: Unable to open dev file: %s.",
kImageProcessorDevice);
}
query_driver(mdp_fd);
// MDP claims to support (from v4l2-ctl):
// [0]: 'NM12' (Y/CbCr 4:2:0 (N-C))
// [1]: 'YM12' (Planar YUV 4:2:0 (N-C))
// [2]: 'YV12' (Planar YVU 4:2:0)
// Chrome uses V4L2_PIX_FMT_YVU420 (YV12).
uint32_t readable_fourcc = V4L2_PIX_FMT_YUV420M ;
const uint32_t readable_fourcc_num_planes = 3;
if (!capabilities(mdp_fd, kImageProcessorDevice, uncompressed_fourcc,
&readable_fourcc)) {
LOG_FATAL("Image processor: Expected formats not supported.");
}
assert(readable_fourcc == V4L2_PIX_FMT_YUV420M);
char mdp_fourcc[FOURCC_SIZE + 1];
fourcc_to_string(readable_fourcc, mdp_fourcc);
LOG_INFO("Image Processor: OUTPUT format %s --> CAPTURE format: %s",
fourcc, mdp_fourcc);
// |mdp_OUTPUT_queue| takes decoded, opaque frames.
mdp_OUTPUT_queue = malloc(sizeof(struct queue));
mdp_OUTPUT_queue->v4lfd = mdp_fd;
mdp_OUTPUT_queue->type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
mdp_OUTPUT_queue->fourcc = uncompressed_fourcc;
mdp_OUTPUT_queue->num_planes = 2;
mdp_OUTPUT_queue->memory = V4L2_MEMORY_DMABUF;
mdp_OUTPUT_queue->displayed_frames = 0;
mdp_OUTPUT_queue->is_streaming = false;
int ret = setup_OUTPUT(mdp_OUTPUT_queue, &CAPTURE_queue.coded_width,
&CAPTURE_queue.coded_height);
if (ret < 0)
LOG_FATAL("Image Processor: Unable to configure OUTPUT_queue.");
if (!apply_selection_to_queue(mdp_OUTPUT_queue, CAPTURE_queue.display_area))
LOG_FATAL("Image Processor: Unable to set SELECTION for OUTPUT_queue.");
// |mdp_CAPTURE_queue| has "clear" and fully mappable and readable frames.
mdp_CAPTURE_queue = malloc(sizeof(struct queue));
mdp_CAPTURE_queue->v4lfd = mdp_fd;
mdp_CAPTURE_queue->type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
mdp_CAPTURE_queue->fourcc = readable_fourcc;
mdp_CAPTURE_queue->num_planes = readable_fourcc_num_planes;
mdp_CAPTURE_queue->memory = V4L2_MEMORY_MMAP;
mdp_CAPTURE_queue->display_area = CAPTURE_queue.display_area;
mdp_CAPTURE_queue->displayed_frames = 0;
mdp_CAPTURE_queue->is_streaming = false;
ret = setup_CAPTURE(gbm, mdp_CAPTURE_queue, DRM_FORMAT_MOD_NONE,
/*frame_rate=*/0, /*use_CAPTURE_queue_dimensions=*/ true);
if (ret < 0)
LOG_FATAL("Image Processor: Unable to configure CAPTURE_queue.");
process_opaque_frame_fp = &process_opaque_frame;
}
if (subscribe_to_event(v4lfd, V4L2_EVENT_SOURCE_CHANGE, 0 /* id */))
LOG_FATAL("Unable to subscribe to source change event.");
if (!ret) {
ret = decode(gbm, &CAPTURE_queue, &OUTPUT_queue, modifier,
output_file_prefix, frames_to_decode, print_md5hash,
process_opaque_frame_fp, mdp_OUTPUT_queue, mdp_CAPTURE_queue);
}
if (output_file_prefix)
free(output_file_prefix);
if(mdp_OUTPUT_queue)
free(mdp_OUTPUT_queue);
if (mdp_CAPTURE_queue)
free(mdp_CAPTURE_queue);
if (mdp_fd != -1)
close(mdp_fd);
cleanup_queue(&OUTPUT_queue);
cleanup_queue(&CAPTURE_queue);
close(v4lfd);
close(drm_device_fd);
free(file_name);
cleanup_bitstream();
return ret;
}