From ca08adb62fc418dbe35fc977e6cdc9df4de1f510 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Kamil=20Trzci=C5=84ski?= Date: Sat, 29 Oct 2022 14:30:43 +0200 Subject: [PATCH] Add `-snapshot.height`, `-stream.height` and `-video.height` --- README.md | 26 ++- cmd/camera-streamer/main.c | 33 +++ cmd/camera-streamer/opts.c | 30 ++- device/camera/camera.c | 30 +-- device/camera/camera.h | 37 +-- device/camera/camera_decoder.c | 81 +++++++ device/camera/camera_input.c | 52 +---- device/camera/camera_isp.c | 57 +---- device/camera/camera_output.c | 214 +++++------------- device/camera/camera_pipeline.c | 85 +++++++ device/camera/camera_rescaller.c | 77 +++++++ device/device.c | 9 +- device/device.h | 3 +- device/links.c | 38 ++-- device/links.h | 3 +- device/v4l2/buffer_list.c | 43 ++-- html/index.html | 40 ++-- output/http_ffmpeg.c | 4 +- output/http_h264.c | 10 +- output/http_jpeg.c | 12 +- output/output.c | 8 +- output/output.h | 8 +- output/rtsp/rtsp.cc | 66 +----- output/webrtc/webrtc.cc | 53 +---- service/camera-streamer-arducam-16MP.service | 10 +- service/camera-streamer-arducam-64MP.service | 10 +- service/camera-streamer-raspi-v2-8MP.service | 10 +- service/camera-streamer-raspi-v3-12MP.service | 10 +- service/camera-streamer-usb-cam.service | 8 +- 29 files changed, 553 insertions(+), 514 deletions(-) create mode 100644 device/camera/camera_decoder.c create mode 100644 device/camera/camera_pipeline.c create mode 100644 device/camera/camera_rescaller.c diff --git a/README.md b/README.md index 5d004f4..fd50cb8 100644 --- a/README.md +++ b/README.md @@ -40,6 +40,22 @@ apt-get dist-upgrade reboot ``` +Ensure that your `/boot/config.txt` has enough of GPU memory (required for JPEG re-encoding): + +``` +# Example for IMX519 +dtoverlay=vc4-kms-v3d,cma-128 +gpu_mem=128 # preferred 160 or 256MB +dtoverlay=imx519 + +# Example for Arducam 64MP +gpu_mem=128 +dtoverlay=arducam_64mp,media-controller=1 + +# Example for USB cam +gpu_mem=128 +``` + ## Compile ```bash @@ -137,8 +153,13 @@ All streams are exposed over very simple HTTP server, providing different stream Camera capture and resolution exposed is controlled by threee parameters: - `-camera-width` and `-camera-height` define capture resolution -- (ISP mode only) `-camera-high_res_factor` a default resolution exposed via HTTP (`exposed_width = camera_width / factor, exposed_height = camera_height / factor`) -- (ISP mode only) `-camera-low_res_factor` a low-resolution exposed via HTTP when `?res=low` is added (ex. `http://:8080/snapshot`) +- `-camera-video.height` - define height for an aspect ratio scaled resolution for `/video` and `/webrtc` (H264) output - this might require rescaller and might not always work +- `-camera-stream.height` - define height for an aspect ratio scaled resolution for `/stream` (MJPEG) output - this might require rescaller and might not always work +- `-camera-snapshot.height` - define height for an aspect ratio scaled resolution for `/snapshot` (JPEG) output - this might require rescaller and might not always work + +Any `video`, `stream` and `snapshot` might not work as this requires usually decoding, scaling, and encoding to achieve the desired resolution. + +This works ONLY BEST when using `libcamera`, the support for `USB` will varry and might require configuring `/boot/config.txt` to set enough of GPU memory to be able to re-encode JPEG. ## RTSP server @@ -205,6 +226,7 @@ and enabled in `imx519`. Focus can be manually controlled via `i2c-tools`: ```shell # /boot/config.txt dtoverlay=imx519,media-controller=0 +gpu_mem=160 # at least 128 # /etc/modules-load.d/modules.conf i2c-dev diff --git a/cmd/camera-streamer/main.c b/cmd/camera-streamer/main.c index 75d806b..05da0af 100644 --- a/cmd/camera-streamer/main.c +++ b/cmd/camera-streamer/main.c @@ -16,6 +16,36 @@ extern rtsp_options_t rtsp_options; camera_t *camera; +void deprecations() +{ + if (camera_options.high_res_factor > 0) { + printf("Using deprecated `-camera-high_res_factor`. Use `-camera-snapshot.height` instead."); + + if (!camera_options.snapshot.height) + camera_options.snapshot.height = camera_options.height / camera_options.high_res_factor; + } + if (camera_options.low_res_factor > 0) { + printf("Using deprecated `-camera-low_res_factor`. Use `-camera-stream.height` or `-camera-video.height` instead."); + + if (!camera_options.stream.height) + camera_options.stream.height = camera_options.height / camera_options.low_res_factor; + if (!camera_options.video.height) + camera_options.video.height = camera_options.height / camera_options.low_res_factor; + } +} + +void inherit() +{ + if (!camera_options.snapshot.height || camera_options.snapshot.height > camera_options.height) + camera_options.snapshot.height = camera_options.height; + + if (!camera_options.video.height || camera_options.video.height > camera_options.snapshot.height) + camera_options.video.height = camera_options.snapshot.height; + + if (!camera_options.stream.height || camera_options.stream.height > camera_options.video.height) + camera_options.stream.height = camera_options.video.height; +} + int main(int argc, char *argv[]) { int http_fd = -1; @@ -25,6 +55,9 @@ int main(int argc, char *argv[]) return -1; } + deprecations(); + inherit(); + if (camera_options.list_options) { camera = camera_open(&camera_options); if (camera) { diff --git a/cmd/camera-streamer/opts.c b/cmd/camera-streamer/opts.c index 7e7e56f..77ae39c 100644 --- a/cmd/camera-streamer/opts.c +++ b/cmd/camera-streamer/opts.c @@ -4,6 +4,7 @@ #include "util/opts/fourcc.h" #include "device/camera/camera.h" #include "output/rtsp/rtsp.h" +#include "output/output.h" camera_options_t camera_options = { .path = "", @@ -13,13 +14,19 @@ camera_options_t camera_options = { .nbufs = 3, .fps = 30, .allow_dma = true, - .high_res_factor = 1.0, + .high_res_factor = 0.0, .low_res_factor = 0.0, .auto_reconnect = 0, .auto_focus = true, .options = "", .list_options = false, - .h264 = { + .snapshot = { + .options = "compression_quality=80" + }, + .stream = { + .options = "compression_quality=80" + }, + .video = { .options = "video_bitrate_mode=0" OPTION_VALUE_LIST_SEP "video_bitrate=2000000" OPTION_VALUE_LIST_SEP @@ -29,10 +36,7 @@ camera_options_t camera_options = { "h264_profile=4" OPTION_VALUE_LIST_SEP "h264_minimum_qp_value=16" OPTION_VALUE_LIST_SEP "h264_maximum_qp_value=32" - }, - .jpeg = { - .options = "compression_quality=80" - }, + } }; http_server_options_t http_options = { @@ -93,8 +97,18 @@ option_t all_options[] = { DEFINE_OPTION_DEFAULT(camera, hflip, bool, "1", "Do horizontal image flip (does not work with all camera)."), DEFINE_OPTION_PTR(camera, isp.options, list, "Set the ISP processing options. List all available options with `-camera-list_options`."), - DEFINE_OPTION_PTR(camera, jpeg.options, list, "Set the JPEG compression options. List all available options with `-camera-list_options`."), - DEFINE_OPTION_PTR(camera, h264.options, list, "Set the H264 encoding options. List all available options with `-camera-list_options`."), + + DEFINE_OPTION_PTR(camera, snapshot.options, list, "Set the JPEG compression options. List all available options with `-camera-list_options`."), + DEFINE_OPTION(camera, snapshot.height, uint, "Override the snapshot height and maintain aspect ratio."), + + DEFINE_OPTION_DEFAULT(camera, stream.disabled, bool, "1", "Disable stream."), + DEFINE_OPTION_PTR(camera, stream.options, list, "Set the JPEG compression options. List all available options with `-camera-list_options`."), + DEFINE_OPTION(camera, stream.height, uint, "Override the stream height and maintain aspect ratio."), + + DEFINE_OPTION_DEFAULT(camera, video.disabled, bool, "1", "Disable video."), + DEFINE_OPTION_PTR(camera, video.options, list, "Set the H264 encoding options. List all available options with `-camera-list_options`."), + DEFINE_OPTION(camera, video.height, uint, "Override the video height and maintain aspect ratio."), + DEFINE_OPTION_DEFAULT(camera, list_options, bool, "1", "List all available options and exit."), DEFINE_OPTION(http, port, uint, "Set the HTTP web-server port."), diff --git a/device/camera/camera.c b/device/camera/camera.c index 6ff080d..f8d6aba 100644 --- a/device/camera/camera.c +++ b/device/camera/camera.c @@ -43,13 +43,15 @@ void camera_close(camera_t **camerap) for (int i = MAX_DEVICES; i-- > 0; ) { link_t *link = &camera->links[i]; - if (link->callbacks.on_buffer) { - link->callbacks.on_buffer(NULL); - link->callbacks.on_buffer = NULL; - } - if (link->callbacks.buf_lock) { - buffer_lock_capture(link->callbacks.buf_lock, NULL); - link->callbacks.buf_lock = NULL; + for (int j = 0; j < link->n_callbacks; j++) { + if (link->callbacks[j].on_buffer) { + link->callbacks[j].on_buffer(NULL); + link->callbacks[j].on_buffer = NULL; + } + if (link->callbacks[j].buf_lock) { + buffer_lock_capture(link->callbacks[j].buf_lock, NULL); + link->callbacks[j].buf_lock = NULL; + } } } @@ -86,10 +88,10 @@ void camera_capture_add_output(camera_t *camera, buffer_list_t *capture, buffer_ link->sinks[nsinks] = output; } -void camera_capture_set_callbacks(camera_t *camera, buffer_list_t *capture, link_callbacks_t callbacks) +void camera_capture_add_callbacks(camera_t *camera, buffer_list_t *capture, link_callbacks_t callbacks) { link_t *link = camera_ensure_capture(camera, capture); - link->callbacks = callbacks; + link->callbacks[link->n_callbacks++] = callbacks; if (callbacks.buf_lock) { callbacks.buf_lock->buf_list = capture; @@ -107,12 +109,10 @@ int camera_set_params(camera_t *camera) } // Set some defaults - for (int i = 0; i < 2; i++) { - device_set_option_list(camera->legacy_isp[i], camera->options.isp.options); - device_set_option_list(camera->codec_jpeg[i], camera->options.jpeg.options); - device_set_option_string(camera->codec_h264[i], "repeat_sequence_header", "1"); // required for force key support - device_set_option_list(camera->codec_h264[i], camera->options.h264.options); - } + device_set_option_list(camera->codec_snapshot, camera->options.snapshot.options); + device_set_option_list(camera->codec_stream, camera->options.stream.options); + device_set_option_string(camera->codec_video, "repeat_sequence_header", "1"); // required for force key support + device_set_option_list(camera->codec_video, camera->options.video.options); return 0; } diff --git a/device/camera/camera.h b/device/camera/camera.h index c185564..60ad814 100644 --- a/device/camera/camera.h +++ b/device/camera/camera.h @@ -4,6 +4,7 @@ #include "device/device.h" #define MAX_DEVICES 20 +#define MAX_RESCALLERS 4 #define MAX_HTTP_METHODS 20 #define CAMERA_DEVICE_CAMERA 0 @@ -14,6 +15,12 @@ typedef enum { CAMERA_LIBCAMERA } camera_type_t; +typedef struct camera_output_options_s { + bool disabled; + unsigned height; + char options[CAMERA_OPTIONS_LENGTH]; +} camera_output_options_t; + typedef struct camera_options_s { char path[256]; unsigned width, height, format; @@ -40,13 +47,9 @@ typedef struct camera_options_s { char options[CAMERA_OPTIONS_LENGTH]; } isp; - struct { - char options[CAMERA_OPTIONS_LENGTH]; - } jpeg; - - struct { - char options[CAMERA_OPTIONS_LENGTH]; - } h264; + camera_output_options_t snapshot; + camera_output_options_t stream; + camera_output_options_t video; } camera_options_t; typedef struct camera_s { @@ -60,9 +63,10 @@ typedef struct camera_s { device_t *camera; device_t *decoder; // decode JPEG/H264 into YUVU device_t *isp; - device_t *legacy_isp[2]; - device_t *codec_jpeg[2]; // encode YUVU into JPEG - device_t *codec_h264[2]; // encode YUVU into H264 + device_t *rescallers[3]; + device_t *codec_snapshot; + device_t *codec_stream; + device_t *codec_video; }; }; @@ -81,12 +85,13 @@ int camera_run(camera_t *camera); link_t *camera_ensure_capture(camera_t *camera, buffer_list_t *capture); void camera_capture_add_output(camera_t *camera, buffer_list_t *capture, buffer_list_t *output); -void camera_capture_set_callbacks(camera_t *camera, buffer_list_t *capture, link_callbacks_t callbacks); +void camera_capture_add_callbacks(camera_t *camera, buffer_list_t *capture, link_callbacks_t callbacks); int camera_configure_input(camera_t *camera); -int camera_configure_decoder(camera_t *camera, buffer_list_t *src_capture); -int camera_configure_output_rescaler(camera_t *camera, buffer_list_t *src_capture, float high_div, float low_div); -int camera_configure_output(camera_t *camera, buffer_list_t *src_capture, int res); +int camera_configure_pipeline(camera_t *camera, buffer_list_t *camera_capture); -int camera_configure_isp(camera_t *camera, buffer_list_t *src, float high_div, float low_div); -int camera_configure_legacy_isp(camera_t *camera, buffer_list_t *src, float div, int res); +buffer_list_t *camera_configure_isp(camera_t *camera, buffer_list_t *src_capture); +buffer_list_t *camera_configure_decoder(camera_t *camera, buffer_list_t *src_capture); +unsigned camera_rescaller_align_size(unsigned target_height); +buffer_list_t *camera_configure_rescaller(camera_t *camera, buffer_list_t *src_capture, const char *name, unsigned target_height, unsigned formats[]); +int camera_configure_output(camera_t *camera, const char *name, unsigned target_height, unsigned formats[], link_callbacks_t callbacks, device_t **device); diff --git a/device/camera/camera_decoder.c b/device/camera/camera_decoder.c new file mode 100644 index 0000000..8387cba --- /dev/null +++ b/device/camera/camera_decoder.c @@ -0,0 +1,81 @@ +#include "camera.h" + +#include "device/buffer.h" +#include "device/buffer_list.h" +#include "device/device.h" +#include "device/device_list.h" +#include "device/links.h" +#include "util/opts/log.h" +#include "util/opts/fourcc.h" +#include "device/buffer_list.h" +#include "util/http/http.h" +#include "output/rtsp/rtsp.h" +#include "output/output.h" + +static unsigned decoder_formats[] = +{ + // best quality + V4L2_PIX_FMT_YUYV, + + // medium quality + V4L2_PIX_FMT_YUV420, + V4L2_PIX_FMT_NV12, + + // low quality + V4L2_PIX_FMT_YVU420, + V4L2_PIX_FMT_NV21, + 0 +}; + +static void decoder_debug_on_buffer(buffer_t *buf) +{ + if (!buf) { + return; + } + + static int index = 0; + + char path[256]; + sprintf(path, "/tmp/decoder_capture.%d.%s", index++ % 10, fourcc_to_string(buf->buf_list->fmt.format).buf); + + FILE *fp = fopen(path, "wb"); + if (!fp) { + return; + } + + fwrite(buf->start, 1, buf->used, fp); + fclose(fp); +} + +static link_callbacks_t decoder_debug_callbacks = { + .name = "DECODER-DEBUG-CAPTURE", + .on_buffer = decoder_debug_on_buffer +}; + +buffer_list_t *camera_configure_decoder(camera_t *camera, buffer_list_t *src_capture) +{ + unsigned chosen_format = 0; + device_info_t *device = device_list_find_m2m_formats(camera->device_list, src_capture->fmt.format, decoder_formats, &chosen_format); + + if (!device) { + LOG_INFO(camera, "Cannot find '%s' decoder", fourcc_to_string(src_capture->fmt.format).buf); + return NULL; + } + + device_video_force_key(camera->camera); + + camera->decoder = device_v4l2_open("DECODER", device->path); + + buffer_list_t *decoder_output = device_open_buffer_list_output( + camera->decoder, src_capture); + buffer_list_t *decoder_capture = device_open_buffer_list_capture( + camera->decoder, NULL, decoder_output, 0, 0, chosen_format, true); + + if (getenv("CAMERA_DECODER_DEBUG")) { + camera_capture_add_callbacks(camera, decoder_capture, decoder_debug_callbacks); + } + + camera_capture_add_output(camera, src_capture, decoder_output); + + return decoder_capture; +} diff --git a/device/camera/camera_input.c b/device/camera/camera_input.c index 57b3bc1..e7f4662 100644 --- a/device/camera/camera_input.c +++ b/device/camera/camera_input.c @@ -32,47 +32,13 @@ static int camera_configure_input_v4l2(camera_t *camera) camera->camera->opts.allow_dma = false; } - buffer_list_t *camera_capture = device_open_buffer_list(camera->camera, true, camera->options.width, camera->options.height, camera->options.format, 0, camera->options.nbufs, true); + buffer_list_t *camera_capture = device_open_buffer_list(camera->camera, true, + camera->options.width, camera->options.height, camera->options.format, 0, camera->options.nbufs, true); if (!camera_capture) { return -1; } - camera_capture->do_timestamps = true; - if (camera->options.fps > 0) { - camera_capture->fmt.interval_us = 1000 * 1000 / camera->options.fps; - } - - switch (camera_capture->fmt.format) { - case V4L2_PIX_FMT_YUYV: - case V4L2_PIX_FMT_YVYU: - case V4L2_PIX_FMT_VYUY: - case V4L2_PIX_FMT_UYVY: - case V4L2_PIX_FMT_YUV420: - case V4L2_PIX_FMT_RGB565: - case V4L2_PIX_FMT_RGB24: - if (camera->options.high_res_factor > 1) { - // Use ISP, as there are two resolutions - return camera_configure_isp(camera, camera_capture, - camera->options.high_res_factor, camera->options.low_res_factor); - } else { - // Use direct approach, as there's likely low frequently used low resolution - return camera_configure_output_rescaler(camera, camera_capture, - camera->options.high_res_factor, camera->options.low_res_factor); - } - - case V4L2_PIX_FMT_MJPEG: - case V4L2_PIX_FMT_H264: - return camera_configure_decoder(camera, camera_capture); - - case V4L2_PIX_FMT_SRGGB10P: - return camera_configure_isp(camera, camera_capture, - camera->options.high_res_factor, camera->options.low_res_factor); - - default: - LOG_INFO(camera, "Unsupported camera format=%s", - fourcc_to_string(camera_capture->fmt.format).buf); - return -1; - } + return camera_configure_pipeline(camera, camera_capture); } static int camera_configure_input_libcamera(camera_t *camera) @@ -89,8 +55,8 @@ static int camera_configure_input_libcamera(camera_t *camera) buffer_list_t *camera_capture = device_open_buffer_list( camera->camera, true, - camera->options.width / camera->options.high_res_factor, - camera->options.height / camera->options.high_res_factor, + camera->options.width, + camera->options.height, camera->options.format, 0, camera->options.nbufs, @@ -99,14 +65,8 @@ static int camera_configure_input_libcamera(camera_t *camera) if (!camera_capture) { return -1; } - camera_capture->do_timestamps = true; - if (camera->options.fps > 0) { - camera_capture->fmt.interval_us = 1000 * 1000 / camera->options.fps; - } - - return camera_configure_output_rescaler(camera, camera_capture, - 1.0, camera->options.low_res_factor / camera->options.high_res_factor); + return camera_configure_pipeline(camera, camera_capture); } int camera_configure_input(camera_t *camera) diff --git a/device/camera/camera_isp.c b/device/camera/camera_isp.c index 5a3eab4..d5b695a 100644 --- a/device/camera/camera_isp.c +++ b/device/camera/camera_isp.c @@ -10,67 +10,16 @@ #include "device/buffer_list.h" #include "util/http/http.h" -int camera_configure_output_rescaler2(camera_t *camera, buffer_list_t *src_capture, float div, int res); - -int camera_configure_isp(camera_t *camera, buffer_list_t *src_capture, float high_div, float low_div) +buffer_list_t *camera_configure_isp(camera_t *camera, buffer_list_t *src_capture) { camera->isp = device_v4l2_open("ISP", "/dev/video13"); buffer_list_t *isp_output = device_open_buffer_list_output( camera->isp, src_capture); - buffer_list_t *isp_capture = device_open_buffer_list_capture2( - camera->isp, "/dev/video14", isp_output, high_div, V4L2_PIX_FMT_YUYV, true); - - camera_capture_add_output(camera, src_capture, isp_output); - - if (camera_configure_output(camera, isp_capture, 0) < 0) { - return -1; - } - -#if 1 - return camera_configure_output_rescaler2(camera, isp_capture, low_div, 1); -#else - if (low_div > 1) { - // TODO: Currently we cannot pull the data at the same time from /dev/video14 and /dev/video15 - // if only one path (consuming /dev/video15) is activated - buffer_list_t *isp_lowres_capture = device_open_buffer_list_capture2( - camera->isp, "/dev/video15", isp_output, low_div, V4L2_PIX_FMT_YUYV, true); - - if (camera_configure_output(camera, isp_lowres_capture, 1) < 0) { - return -1; - } - } - - return 0; -#endif -} - -static const char *isp_names[2] = { - "ISP", - "ISP-LOW" -}; - -int camera_configure_legacy_isp(camera_t *camera, buffer_list_t *src_capture, float div, int res) -{ - device_info_t *device = device_list_find_m2m_format(camera->device_list, src_capture->fmt.format, V4L2_PIX_FMT_YUYV); - - if (!device) { - LOG_INFO(camera, "Cannot find ISP to scale from '%s' to 'YUYV'", fourcc_to_string(src_capture->fmt.format).buf); - return -1; - } - - camera->legacy_isp[res] = device_v4l2_open(isp_names[res], device->path); - - buffer_list_t *isp_output = device_open_buffer_list_output( - camera->legacy_isp[res], src_capture); buffer_list_t *isp_capture = device_open_buffer_list_capture( - camera->legacy_isp[res], isp_output, div, V4L2_PIX_FMT_YUYV, true); + camera->isp, "/dev/video14", isp_output, 0, 0, V4L2_PIX_FMT_YUYV, true); camera_capture_add_output(camera, src_capture, isp_output); - if (camera_configure_output(camera, isp_capture, res) < 0) { - return -1; - } - - return 0; + return isp_capture; } diff --git a/device/camera/camera_output.c b/device/camera/camera_output.c index 4d4bc25..10d52c5 100644 --- a/device/camera/camera_output.c +++ b/device/camera/camera_output.c @@ -12,196 +12,94 @@ #include "output/rtsp/rtsp.h" #include "output/output.h" -static const char *jpeg_names[2] = { - "JPEG", - "JPEG-LOW" -}; - -static link_callbacks_t jpeg_callbacks[2] = { - { .name = "JPEG-CAPTURE", .buf_lock = &http_jpeg }, - { .name = "JPEG-LOW-CAPTURE", .buf_lock = &http_jpeg_lowres } -}; - -static const char *h264_names[2] = { - "H264", - "H264-LOW" -}; - -static link_callbacks_t h264_callbacks[2] = { - { .name = "H264-CAPTURE", .buf_lock = &http_h264 }, - { .name = "H264-LOW-CAPTURE", .buf_lock = &http_h264_lowres } -}; - -static int camera_configure_h264_output(camera_t *camera, buffer_list_t *src_capture, int res) +static bool camera_output_matches_capture(buffer_list_t *capture, unsigned target_height, unsigned formats[]) { - if (src_capture->fmt.format == V4L2_PIX_FMT_H264) { - camera_capture_set_callbacks(camera, src_capture, h264_callbacks[res]); - return 0; + if (target_height && capture->fmt.height != target_height && capture->fmt.height != camera_rescaller_align_size(target_height)) { + return false; } - device_info_t *device = device_list_find_m2m_format(camera->device_list, src_capture->fmt.format, V4L2_PIX_FMT_H264); - - if (!device) { - LOG_INFO(camera, "Cannot find H264 encoder to convert from '%s'", fourcc_to_string(src_capture->fmt.format).buf); - return -1; + for (int i = 0; formats[i]; i++) { + if (formats[i] == capture->fmt.format) + return true; } - camera->codec_h264[res] = device_v4l2_open(h264_names[res], device->path); - - buffer_list_t *output = device_open_buffer_list_output(camera->codec_h264[res], src_capture); - buffer_list_t *capture = device_open_buffer_list_capture(camera->codec_h264[res], output, 1.0, V4L2_PIX_FMT_H264, true); - - if (!capture) { - return -1; - } - - camera_capture_add_output(camera, src_capture, output); - camera_capture_set_callbacks(camera, capture, h264_callbacks[res]); - return 0; + return false; } -static int camera_configure_jpeg_output(camera_t *camera, buffer_list_t *src_capture, int res) +static buffer_list_t *camera_find_capture(camera_t *camera, unsigned target_height, unsigned formats[]) { - if (src_capture->fmt.format == V4L2_PIX_FMT_MJPEG || src_capture->fmt.format == V4L2_PIX_FMT_JPEG) { - camera_capture_set_callbacks(camera, src_capture, jpeg_callbacks[res]); - return 0; - } + for (int i = 0; i < MAX_DEVICES; i++) { + if (!camera->devices[i]) + continue; - device_info_t *device = device_list_find_m2m_format(camera->device_list, src_capture->fmt.format, V4L2_PIX_FMT_JPEG); + device_t *device = camera->devices[i]; + for (int j = 0; j < device->n_capture_list; j++) { + buffer_list_t *capture_list = device->capture_lists[j]; - if (!device) { - device = device_list_find_m2m_format(camera->device_list, src_capture->fmt.format, V4L2_PIX_FMT_MJPEG); - } - - if (!device) { - LOG_INFO(camera, "Cannot find JPEG encoder to convert from '%s'", fourcc_to_string(src_capture->fmt.format).buf); - return -1; - } - - camera->codec_jpeg[res] = device_v4l2_open(jpeg_names[res], device->path); - - buffer_list_t *output = device_open_buffer_list_output(camera->codec_jpeg[res], src_capture); - buffer_list_t *capture = device_open_buffer_list_capture(camera->codec_jpeg[res], output, 1.0, V4L2_PIX_FMT_JPEG, true); - - if (!capture) { - return -1; - } - - camera_capture_add_output(camera, src_capture, output); - camera_capture_set_callbacks(camera, capture, jpeg_callbacks[res]); - return 0; -} - -int camera_configure_output(camera_t *camera, buffer_list_t *src_capture, int res) -{ - if (camera_configure_h264_output(camera, src_capture, res) < 0 || - camera_configure_jpeg_output(camera, src_capture, res) < 0) { - return -1; + if (camera_output_matches_capture(capture_list, target_height, formats)) { + return capture_list; + } } + } - return 0; + return NULL; } -int camera_configure_output_rescaler2(camera_t *camera, buffer_list_t *src_capture, float div, int res) +static unsigned rescalled_formats[] = { - if (div > 1) { - return camera_configure_legacy_isp(camera, src_capture, div, res); - } else if (div > 0) { - return camera_configure_output(camera, src_capture, 0); - } else { - return 0; - } -} + // best quality + V4L2_PIX_FMT_YUYV, -int camera_configure_output_rescaler(camera_t *camera, buffer_list_t *src_capture, float high_div, float low_div) -{ - if (camera_configure_output_rescaler2(camera, src_capture, high_div, 0) < 0 || - camera_configure_output_rescaler2(camera, src_capture, low_div, 1) < 0) { - return -1; - } + // medium quality + V4L2_PIX_FMT_YUV420, + V4L2_PIX_FMT_NV12, - return 0; -} + // low quality + V4L2_PIX_FMT_NV21, + V4L2_PIX_FMT_YVU420, -static void decoder_debug_on_buffer(buffer_t *buf) { - if (!buf) { - return; - } - - static int index = 0; - - char path[256]; - sprintf(path, "/tmp/decoder_capture.%d.%s", index++ % 10, fourcc_to_string(buf->buf_list->fmt.format).buf); - - FILE *fp = fopen(path, "wb"); - if (!fp) { - return; - } - - fwrite(buf->start, 1, buf->used, fp); - fclose(fp); -} - -static link_callbacks_t decoder_debug_callbacks = { - .name = "DECODER-DEBUG-CAPTURE", - .on_buffer = decoder_debug_on_buffer + 0 }; -int camera_configure_decoder(camera_t *camera, buffer_list_t *src_capture) +int camera_configure_output(camera_t *camera, const char *name, unsigned target_height, unsigned formats[], link_callbacks_t callbacks, device_t **device) { - unsigned decode_formats[] = { - // best quality - V4L2_PIX_FMT_YUYV, + buffer_list_t *src_capture = camera_find_capture(camera, target_height, formats); + if (src_capture) { + camera_capture_add_callbacks(camera, src_capture, callbacks); + return 0; + } - // medium quality - V4L2_PIX_FMT_YUV420, - V4L2_PIX_FMT_NV12, + src_capture = camera_find_capture(camera, target_height, rescalled_formats); + if (!src_capture) { + // Try to find re-scallabe output + src_capture = camera_find_capture(camera, 0, rescalled_formats); + if (src_capture) { + src_capture = camera_configure_rescaller(camera, src_capture, name, target_height, rescalled_formats); + } + } + + if (!src_capture) { + return -1; + } - // low quality - V4L2_PIX_FMT_NV21, - V4L2_PIX_FMT_YVU420, - 0 - }; unsigned chosen_format = 0; - device_info_t *device = device_list_find_m2m_formats(camera->device_list, src_capture->fmt.format, decode_formats, &chosen_format); + device_info_t *device_info = device_list_find_m2m_formats(camera->device_list, src_capture->fmt.format, formats, &chosen_format); - if (!device) { - LOG_INFO(camera, "Cannot find '%s' decoder", fourcc_to_string(src_capture->fmt.format).buf); + if (!device_info) { + LOG_INFO(camera, "Cannot find encoder to convert from '%s'", fourcc_to_string(src_capture->fmt.format).buf); return -1; } - device_video_force_key(camera->camera); + *device = device_v4l2_open(name, device_info->path); - camera->decoder = device_v4l2_open("DECODER", device->path); + buffer_list_t *output = device_open_buffer_list_output(*device, src_capture); + buffer_list_t *capture = device_open_buffer_list_capture(*device, NULL, output, 0, 0, chosen_format, true); - buffer_list_t *decoder_output = device_open_buffer_list_output( - camera->decoder, src_capture); - buffer_list_t *decoder_capture = device_open_buffer_list_capture( - camera->decoder, decoder_output, 1.0, chosen_format, true); - - if (getenv("CAMERA_DECODER_DEBUG")) { - camera_capture_set_callbacks(camera, decoder_capture, decoder_debug_callbacks); - } - - camera_capture_add_output(camera, src_capture, decoder_output); - - if (camera->options.high_res_factor <= 1 && (src_capture->fmt.format == V4L2_PIX_FMT_JPEG || src_capture->fmt.format == V4L2_PIX_FMT_MJPEG)) { - camera_capture_set_callbacks(camera, src_capture, jpeg_callbacks[0]); - - if (camera_configure_h264_output(camera, decoder_capture, 0) < 0) - return -1; - } else if (camera->options.high_res_factor <= 1 && src_capture->fmt.format == V4L2_PIX_FMT_H264) { - camera_capture_set_callbacks(camera, src_capture, h264_callbacks[0]); - - if (camera_configure_jpeg_output(camera, decoder_capture, 0) < 0) - return -1; - } else if (camera_configure_output_rescaler2(camera, decoder_capture, camera->options.high_res_factor, 0) < 0) { - return -1; - } - - if (camera->options.low_res_factor > 1 && camera_configure_output_rescaler2(camera, decoder_capture, camera->options.low_res_factor, 1) < 0) { + if (!capture) { return -1; } + camera_capture_add_output(camera, src_capture, output); + camera_capture_add_callbacks(camera, capture, callbacks); return 0; } diff --git a/device/camera/camera_pipeline.c b/device/camera/camera_pipeline.c new file mode 100644 index 0000000..762bd73 --- /dev/null +++ b/device/camera/camera_pipeline.c @@ -0,0 +1,85 @@ +#include "camera.h" + +#include "device/buffer.h" +#include "device/buffer_list.h" +#include "device/device.h" +#include "device/device_list.h" +#include "device/links.h" +#include "util/opts/log.h" +#include "util/opts/fourcc.h" +#include "device/buffer_list.h" +#include "util/http/http.h" +#include "output/output.h" + +static unsigned snapshot_formats[] = +{ + V4L2_PIX_FMT_MJPEG, + V4L2_PIX_FMT_JPEG, + 0 +}; + +static link_callbacks_t snapshot_callbacks = +{ + .name = "SNAPSHOT-CAPTURE", + .buf_lock = &snapshot_lock +}; + +static link_callbacks_t stream_callbacks = +{ + .name = "STREAM-CAPTURE", + .buf_lock = &stream_lock +}; + +static unsigned video_formats[] = +{ + V4L2_PIX_FMT_H264, + 0 +}; + +static link_callbacks_t video_callbacks = +{ + .name = "VIDEO-CAPTURE", + .buf_lock = &video_lock +}; + +int camera_configure_pipeline(camera_t *camera, buffer_list_t *capture) +{ + if (capture) { + capture->do_timestamps = true; + + if (camera->options.fps > 0) { + capture->fmt.interval_us = 1000 * 1000 / camera->options.fps; + } + + switch (capture->fmt.format) { + case V4L2_PIX_FMT_SRGGB10P: + case V4L2_PIX_FMT_SGRBG10P: + case V4L2_PIX_FMT_SRGGB10: + case V4L2_PIX_FMT_SGRBG10: + camera_configure_isp(camera, capture); + break; + + case V4L2_PIX_FMT_MJPEG: + case V4L2_PIX_FMT_H264: + camera_configure_decoder(camera, capture); + break; + } + } + + if (!camera->options.snapshot.disabled && camera_configure_output( + camera, "SNAPSHOT", camera->options.snapshot.height, snapshot_formats, snapshot_callbacks, &camera->codec_snapshot) < 0) { + return -1; + } + + if (!camera->options.stream.disabled && camera_configure_output( + camera, "STREAM", camera->options.stream.height, snapshot_formats, stream_callbacks, &camera->codec_stream) < 0) { + return -1; + } + + if (!camera->options.video.disabled && camera_configure_output( + camera, "VIDEO", camera->options.video.height, video_formats, video_callbacks, &camera->codec_video) < 0) { + return -1; + } + + return 0; +} diff --git a/device/camera/camera_rescaller.c b/device/camera/camera_rescaller.c new file mode 100644 index 0000000..addc399 --- /dev/null +++ b/device/camera/camera_rescaller.c @@ -0,0 +1,77 @@ +#include "camera.h" + +#include "device/buffer.h" +#include "device/buffer_list.h" +#include "device/device.h" +#include "device/device_list.h" +#include "device/links.h" +#include "util/opts/log.h" +#include "util/opts/fourcc.h" +#include "device/buffer_list.h" +#include "util/http/http.h" + +unsigned camera_rescaller_align_size(unsigned size) +{ + return (size + 31) / 32 * 32; +} + +buffer_list_t *camera_try_rescaller(camera_t *camera, buffer_list_t *src_capture, const char *name, unsigned target_height, unsigned target_format) +{ + device_info_t *device_info = device_list_find_m2m_format(camera->device_list, src_capture->fmt.format, target_format); + + if (!device_info) { + return NULL; + } + + if (target_height > src_capture->fmt.height) { + LOG_INFO(src_capture, "Upscaling from %dp to %dp does not make sense.", + src_capture->fmt.height, target_height); + return NULL; + } + + target_height = camera_rescaller_align_size(target_height); + unsigned target_width = target_height * src_capture->fmt.width / src_capture->fmt.height; + target_width = camera_rescaller_align_size(target_width); + + char name2[256]; + sprintf(name2, "RESCALLER:%s", name); + + device_t *device = device_v4l2_open(name2, device_info->path); + + buffer_list_t *rescaller_output = device_open_buffer_list_output( + device, src_capture); + buffer_list_t *rescaller_capture = device_open_buffer_list_capture( + device, NULL, rescaller_output, + target_width, target_height, target_format, true); + + if (!rescaller_capture) { + device_close(device); + return NULL; + } + + camera_capture_add_output(camera, src_capture, rescaller_output); + return rescaller_capture; +} + +buffer_list_t *camera_configure_rescaller(camera_t *camera, buffer_list_t *src_capture, const char *name, unsigned target_height, unsigned formats[]) +{ + int rescallers = 0; + for ( ; rescallers < MAX_RESCALLERS && camera->rescallers[rescallers]; rescallers++); + if (rescallers == MAX_RESCALLERS) { + return NULL; + } + + buffer_list_t *rescaller_capture = NULL; // camera_try_rescaller(camera, src_capture, name, target_height, src_capture->fmt.format); + + for (int i = 0; !rescaller_capture && formats[i]; i++) { + rescaller_capture = camera_try_rescaller(camera, src_capture, name, target_height, formats[i]); + } + + if (!rescaller_capture) { + LOG_INFO(src_capture, "Cannot find rescaller to scale from '%s' to 'YUYV'", fourcc_to_string(src_capture->fmt.format).buf); + return NULL; + } + + camera->rescallers[rescallers] = rescaller_capture->dev; + return rescaller_capture; +} diff --git a/device/device.c b/device/device.c index a85e726..df41e15 100644 --- a/device/device.c +++ b/device/device.c @@ -121,19 +121,14 @@ buffer_list_t *device_open_buffer_list_output(device_t *dev, buffer_list_t *capt capture_list->dev->opts.allow_dma ? !capture_list->do_mmap : true); } -buffer_list_t *device_open_buffer_list_capture(device_t *dev, buffer_list_t *output_list, float div, unsigned format, bool do_mmap) -{ - return device_open_buffer_list_capture2(dev, NULL, output_list, div, format, do_mmap); -} - -buffer_list_t *device_open_buffer_list_capture2(device_t *dev, const char *path, buffer_list_t *output_list, float div, unsigned format, bool do_mmap) +buffer_list_t *device_open_buffer_list_capture(device_t *dev, const char *path, buffer_list_t *output_list, unsigned width, unsigned height, unsigned format, bool do_mmap) { if (!dev || !output_list) { return NULL; } return device_open_buffer_list2(dev, path, true, - output_list->fmt.width / div, output_list->fmt.height / div, + width ? width : output_list->fmt.width, height ? height : output_list->fmt.height, format, 0, output_list->nbufs, do_mmap); } diff --git a/device/device.h b/device/device.h index 8fb9556..e2e8112 100644 --- a/device/device.h +++ b/device/device.h @@ -60,8 +60,7 @@ void device_close(device_t *dev); buffer_list_t *device_open_buffer_list(device_t *dev, bool do_capture, unsigned width, unsigned height, unsigned format, unsigned bytesperline, int nbufs, bool do_mmap); buffer_list_t *device_open_buffer_list2(device_t *dev, const char *path, bool do_capture, unsigned width, unsigned height, unsigned format, unsigned bytesperline, int nbufs, bool do_mmap); buffer_list_t *device_open_buffer_list_output(device_t *dev, buffer_list_t *capture_list); -buffer_list_t *device_open_buffer_list_capture(device_t *dev, buffer_list_t *output_list, float div, unsigned format, bool do_mmap); -buffer_list_t *device_open_buffer_list_capture2(device_t *dev, const char *path, buffer_list_t *output_list, float div, unsigned format, bool do_mmap); +buffer_list_t *device_open_buffer_list_capture(device_t *dev, const char *path, buffer_list_t *output_list, unsigned width, unsigned height, unsigned format, bool do_mmap); int device_consume_event(device_t *dev); int device_set_stream(device_t *dev, bool do_on); diff --git a/device/links.c b/device/links.c index 447d416..9c35206 100644 --- a/device/links.c +++ b/device/links.c @@ -34,12 +34,14 @@ int _build_fds(link_t *all_links, struct pollfd *fds, link_t **links, buffer_lis bool paused = true; - if (link->callbacks.check_streaming && link->callbacks.check_streaming()) { - paused = false; - } + for (int j = 0; j < link->n_callbacks; j++) { + if (link->callbacks[j].check_streaming && link->callbacks[j].check_streaming()) { + paused = false; + } - if (link->callbacks.buf_lock && buffer_lock_needs_buffer(link->callbacks.buf_lock)) { - paused = false; + if (link->callbacks[j].buf_lock && buffer_lock_needs_buffer(link->callbacks[j].buf_lock)) { + paused = false; + } } for (int j = 0; link->sinks[j]; j++) { @@ -127,9 +129,11 @@ int links_enqueue_from_source(buffer_list_t *buf_list, link_t *link) LOG_ERROR(buf_list, "No buffer dequeued from source?"); } - if (link->callbacks.validate_buffer && !link->callbacks.validate_buffer(link, buf)) { - LOG_DEBUG(buf_list, "Buffer rejected by validation"); - return 0; + for (int j = 0; j < link->n_callbacks; j++) { + if (link->callbacks[j].validate_buffer && !link->callbacks[j].validate_buffer(link, buf)) { + LOG_DEBUG(buf_list, "Buffer rejected by validation"); + return 0; + } } for (int j = 0; link->sinks[j]; j++) { @@ -142,12 +146,14 @@ int links_enqueue_from_source(buffer_list_t *buf_list, link_t *link) buffer_list_enqueue(link->sinks[j], buf); } - if (link->callbacks.on_buffer) { - link->callbacks.on_buffer(buf); - } + for (int j = 0; j < link->n_callbacks; j++) { + if (link->callbacks[j].on_buffer) { + link->callbacks[j].on_buffer(buf); + } - if (link->callbacks.buf_lock) { - buffer_lock_capture(link->callbacks.buf_lock, buf); + if (link->callbacks[j].buf_lock) { + buffer_lock_capture(link->callbacks[j].buf_lock, buf); + } } return 0; @@ -346,10 +352,10 @@ void links_dump(link_t *all_links) links_dump_buf_list(line, link->sinks[j]); } - if (link->callbacks.name) { - if (link->sinks[0]) + for (int j = 0; j < link->n_callbacks; j++) { + if (link->sinks[0] || j > 0) strcat(line, ", "); - strcat(line, link->callbacks.name); + strcat(line, link->callbacks[j].name); } strcat(line, "]"); diff --git a/device/links.h b/device/links.h index 732daa4..eabfb5f 100644 --- a/device/links.h +++ b/device/links.h @@ -25,7 +25,8 @@ typedef struct link_callbacks_s { typedef struct link_s { buffer_list_t *source; // capture_list buffer_list_t *sinks[10]; - link_callbacks_t callbacks; + link_callbacks_t callbacks[10]; + int n_callbacks; } link_t; int links_init(link_t *all_links); diff --git a/device/v4l2/buffer_list.c b/device/v4l2/buffer_list.c index fa81f3a..49fb8ac 100644 --- a/device/v4l2/buffer_list.c +++ b/device/v4l2/buffer_list.c @@ -59,26 +59,24 @@ int v4l2_buffer_list_open(buffer_list_t *buf_list) v4l2_fmt.type = buf_list->v4l2->type; buffer_format_t fmt = buf_list->fmt; - unsigned block_size = 1; + unsigned block_width = 1, block_height = 1; - // JPEG is in 16x16 blocks (shrink image to fit) (but adapt to 32x32) - // And ISP output - if (strstr(buf_list->name, "JPEG")) { - block_size = 32; - } else if (buf_list->do_capture && strstr(buf_list->name, "ISP")) { - block_size = 32; - } else if (strstr(buf_list->name, "H264")) { - // TODO: even though H264 encoder on RPI requires 32x32 - // it appears that it breaks encoding creating a bar at top - // block_size = 32; + if (buf_list->do_capture && strstr(buf_list->name, "RESCALLER")) { + block_width = 32; + block_height = 32; } - if (block_size > 1) { + LOG_DEBUG(buf_list, "Get current format ..."); + ERR_IOCTL(buf_list, buf_list->v4l2->dev_fd, VIDIOC_G_FMT, &v4l2_fmt, "Can't get format"); + +retry_resolution_set: + + if (block_width > 1 || block_height > 1) { buffer_format_t org_fmt = buf_list->fmt; - fmt.width = shrink_to_block(fmt.width, block_size); - fmt.height = shrink_to_block(fmt.height, block_size); + fmt.width = shrink_to_block(fmt.width, block_width); + fmt.height = shrink_to_block(fmt.height, block_height); LOG_VERBOSE(buf_list, "Adapting size to %dx%d block: %dx%d shrunk to %dx%d", - block_size, block_size, + block_width, block_height, org_fmt.width, org_fmt.height, fmt.width, fmt.height); } @@ -86,9 +84,6 @@ int v4l2_buffer_list_open(buffer_list_t *buf_list) fmt.bytesperline = 0; } - LOG_DEBUG(buf_list, "Get current format ..."); - ERR_IOCTL(buf_list, buf_list->v4l2->dev_fd, VIDIOC_G_FMT, &v4l2_fmt, "Can't set format"); - if (buf_list->v4l2->do_mplanes) { v4l2_fmt.fmt.pix_mp.colorspace = V4L2_COLORSPACE_JPEG; if (fmt.width) @@ -131,9 +126,17 @@ int v4l2_buffer_list_open(buffer_list_t *buf_list) if (buf_list->fmt.width != fmt.width || buf_list->fmt.height != fmt.height) { if (fmt.bytesperline) { - LOG_ERROR(buf_list, "Requested resolution=%ux%u is unavailable. Got %ux%u. " - "Consider using the `-camera-high_res_factor=2` or `-camera-low_res_factor=3`", + LOG_INFO(buf_list, "Requested resolution=%ux%u is unavailable. Got %ux%u.", fmt.width, fmt.height, buf_list->fmt.width, buf_list->fmt.height); + + if (block_height > 1) { + goto error; + } + + // Try to shrink resolution + block_width = 32; + block_height = 32; + goto retry_resolution_set; } else { LOG_INFO(buf_list, "Requested resolution=%ux%u is unavailable. Got %ux%u. Accepted", fmt.width, fmt.height, buf_list->fmt.width, buf_list->fmt.height); diff --git a/html/index.html b/html/index.html index 1d1d626..415f6e5 100644 --- a/html/index.html +++ b/html/index.html @@ -11,44 +11,56 @@

diff --git a/output/http_ffmpeg.c b/output/http_ffmpeg.c index 1336346..bf5a764 100644 --- a/output/http_ffmpeg.c +++ b/output/http_ffmpeg.c @@ -10,8 +10,6 @@ #include "device/device.h" #include "util/ffmpeg/remuxer.h" -buffer_lock_t *http_h264_buffer_for_res(http_worker_t *worker); - static const char *const VIDEO_HEADER = "HTTP/1.0 200 OK\r\n" "Access-Control-Allow-Origin: *\r\n" @@ -135,7 +133,7 @@ static void http_ffmpeg_video(http_worker_t *worker, FILE *stream, const char *c #endif int n = buffer_lock_write_loop( - http_h264_buffer_for_res(worker), + &video_lock, 0, 0, (buffer_write_fn)http_ffmpeg_video_buf_part, diff --git a/output/http_h264.c b/output/http_h264.c index 90a8a3a..8ec085e 100644 --- a/output/http_h264.c +++ b/output/http_h264.c @@ -16,14 +16,6 @@ static const char *const VIDEO_HEADER = "Content-Type: application/octet-stream\r\n" "\r\n"; -buffer_lock_t *http_h264_buffer_for_res(http_worker_t *worker) -{ - if (strstr(worker->request_params, HTTP_LOW_RES_PARAM) && http_h264_lowres.buf_list) - return &http_h264_lowres; - else - return &http_h264; -} - typedef struct { FILE *stream; bool wrote_header; @@ -60,7 +52,7 @@ void http_h264_video(http_worker_t *worker, FILE *stream) { http_video_status_t status = { stream }; - int n = buffer_lock_write_loop(http_h264_buffer_for_res(worker), 0, 0, (buffer_write_fn)http_video_buf_part, &status); + int n = buffer_lock_write_loop(&video_lock, 0, 0, (buffer_write_fn)http_video_buf_part, &status); if (status.wrote_header) { return; diff --git a/output/http_jpeg.c b/output/http_jpeg.c index ded1cb3..897c749 100644 --- a/output/http_jpeg.c +++ b/output/http_jpeg.c @@ -20,14 +20,6 @@ static const char *const STREAM_PART = "Content-Type: " CONTENT_TYPE "\r\n" CONT static const char *const STREAM_BOUNDARY = "\r\n" "--" PART_BOUNDARY "\r\n"; -buffer_lock_t *http_jpeg_buffer_for_res(http_worker_t *worker) -{ - if (strstr(worker->request_params, HTTP_LOW_RES_PARAM) && http_jpeg_lowres.buf_list) - return &http_jpeg_lowres; - else - return &http_jpeg; -} - int http_snapshot_buf_part(buffer_lock_t *buf_lock, buffer_t *buf, int frame, FILE *stream) { fprintf(stream, "HTTP/1.1 200 OK\r\n"); @@ -40,7 +32,7 @@ int http_snapshot_buf_part(buffer_lock_t *buf_lock, buffer_t *buf, int frame, FI void http_snapshot(http_worker_t *worker, FILE *stream) { - int n = buffer_lock_write_loop(http_jpeg_buffer_for_res(worker), 1, 0, (buffer_write_fn)http_snapshot_buf_part, stream); + int n = buffer_lock_write_loop(&snapshot_lock, 1, 0, (buffer_write_fn)http_snapshot_buf_part, stream); if (n <= 0) { http_500(stream, NULL); @@ -68,7 +60,7 @@ int http_stream_buf_part(buffer_lock_t *buf_lock, buffer_t *buf, int frame, FILE void http_stream(http_worker_t *worker, FILE *stream) { - int n = buffer_lock_write_loop(http_jpeg_buffer_for_res(worker), 0, 0, (buffer_write_fn)http_stream_buf_part, stream); + int n = buffer_lock_write_loop(&stream_lock, 0, 0, (buffer_write_fn)http_stream_buf_part, stream); if (n == 0) { http_500(stream, NULL); diff --git a/output/output.c b/output/output.c index 5f84c35..c19aa25 100644 --- a/output/output.c +++ b/output/output.c @@ -1,7 +1,5 @@ #include "device/buffer_lock.h" -DEFINE_BUFFER_LOCK(http_h264, 0); -DEFINE_BUFFER_LOCK(http_h264_lowres, 0); - -DEFINE_BUFFER_LOCK(http_jpeg, 1000); -DEFINE_BUFFER_LOCK(http_jpeg_lowres, 1000); +DEFINE_BUFFER_LOCK(snapshot_lock, 1000); +DEFINE_BUFFER_LOCK(stream_lock, 0); +DEFINE_BUFFER_LOCK(video_lock, 0); diff --git a/output/output.h b/output/output.h index 6b98e44..2669e20 100644 --- a/output/output.h +++ b/output/output.h @@ -5,11 +5,9 @@ struct http_worker_s; struct buffer_s; -extern struct buffer_lock_s http_h264; -extern struct buffer_lock_s http_h264_lowres; - -extern struct buffer_lock_s http_jpeg; -extern struct buffer_lock_s http_jpeg_lowres; +extern struct buffer_lock_s snapshot_lock; +extern struct buffer_lock_s stream_lock; +extern struct buffer_lock_s video_lock; // M-JPEG void http_snapshot(struct http_worker_s *worker, FILE *stream); diff --git a/output/rtsp/rtsp.cc b/output/rtsp/rtsp.cc index 404cc46..9207b24 100644 --- a/output/rtsp/rtsp.cc +++ b/output/rtsp/rtsp.cc @@ -26,13 +26,12 @@ static pthread_mutex_t rtsp_lock = PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP; static class DynamicH264Stream *rtsp_streams; static const char *stream_name = "stream.h264"; -static const char *stream_low_res_name = "stream_low_res.h264"; class DynamicH264Stream : public FramedSource { public: - DynamicH264Stream(UsageEnvironment& env, Boolean lowResMode) - : FramedSource(env), fHaveStartedReading(False), fLowResMode(lowResMode) + DynamicH264Stream(UsageEnvironment& env) + : FramedSource(env), fHaveStartedReading(False) { } @@ -65,16 +64,12 @@ public: pthread_mutex_unlock(&rtsp_lock); } - void receiveData(buffer_t *buf, bool lowResMode) + void receiveData(buffer_t *buf) { if (!isCurrentlyAwaitingData()) { return; // we're not ready for the data yet } - if (fLowResMode != lowResMode) { - return; - } - if (buf->flags.is_keyframe) { fHadKeyFrame = true; } @@ -109,7 +104,6 @@ public: Boolean fHaveStartedReading; Boolean fHadKeyFrame; Boolean fRequestedKeyFrame; - Boolean fLowResMode; DynamicH264Stream *pNextStream; }; @@ -117,23 +111,21 @@ public: class DynamicH264VideoFileServerMediaSubsession : public OnDemandServerMediaSubsession { public: - DynamicH264VideoFileServerMediaSubsession(UsageEnvironment& env, Boolean reuseFirstSource, Boolean lowResMode) - : OnDemandServerMediaSubsession(env, reuseFirstSource), fLowResMode(lowResMode) + DynamicH264VideoFileServerMediaSubsession(UsageEnvironment& env, Boolean reuseFirstSource) + : OnDemandServerMediaSubsession(env, reuseFirstSource) { } virtual FramedSource* createNewStreamSource(unsigned clientSessionId, unsigned& estBitrate) { estBitrate = 500; // kbps, estimate - return H264VideoStreamFramer::createNew(envir(), new DynamicH264Stream(envir(), fLowResMode)); + return H264VideoStreamFramer::createNew(envir(), new DynamicH264Stream(envir())); } virtual RTPSink* createNewRTPSink(Groupsock* rtpGroupsock, unsigned char rtpPayloadTypeIfDynamic, FramedSource* /*inputSource*/) { return H264VideoRTPSink::createNew(envir(), rtpGroupsock, rtpPayloadTypeIfDynamic); } - - Boolean fLowResMode; }; class DynamicRTSPServer: public RTSPServerSupportingHTTPStreaming @@ -164,13 +156,8 @@ protected: protected: // redefined virtual functions virtual ServerMediaSession* lookupServerMediaSession(char const* streamName, Boolean isFirstLookupInSession) { - bool lowResMode = false; - if (strcmp(streamName, stream_name) == 0) { LOG_INFO(NULL, "Requesting %s stream...", streamName); - } else if (strcmp(streamName, stream_low_res_name) == 0) { - LOG_INFO(NULL, "Requesting %s stream (low resolution mode)...", streamName); - lowResMode = true; } else { LOG_INFO(NULL, "No stream available: '%s'", streamName); return NULL; @@ -188,7 +175,7 @@ protected: // redefined virtual functions sms = ServerMediaSession::createNew(envir(), streamName, streamName, "streamed by the LIVE555 Media Server");; OutPacketBuffer::maxSize = 2000000; // allow for some possibly large H.264 frames - auto subsession = new DynamicH264VideoFileServerMediaSubsession(envir(), false, lowResMode); + auto subsession = new DynamicH264VideoFileServerMediaSubsession(envir(), false); sms->addSubsession(subsession); addServerMediaSession(sms); return sms; @@ -208,8 +195,7 @@ static bool rtsp_h264_needs_buffer(buffer_lock_t *buf_lock) pthread_mutex_lock(&rtsp_lock); for (DynamicH264Stream *stream = rtsp_streams; stream; stream = stream->pNextStream) { - if (!stream->fLowResMode) - needsBuffer = true; + needsBuffer = true; } pthread_mutex_unlock(&rtsp_lock); return needsBuffer; @@ -219,33 +205,7 @@ static void rtsp_h264_capture(buffer_lock_t *buf_lock, buffer_t *buf) { pthread_mutex_lock(&rtsp_lock); for (DynamicH264Stream *stream = rtsp_streams; stream; stream = stream->pNextStream) { - stream->receiveData(buf, false); - - if (!http_h264_lowres.buf_list) { - stream->receiveData(buf, true); - } - } - pthread_mutex_unlock(&rtsp_lock); -} - -static bool rtsp_h264_low_res_needs_buffer(buffer_lock_t *buf_lock) -{ - bool needsBuffer = false; - - pthread_mutex_lock(&rtsp_lock); - for (DynamicH264Stream *stream = rtsp_streams; stream; stream = stream->pNextStream) { - if (stream->fLowResMode) - needsBuffer = true; - } - pthread_mutex_unlock(&rtsp_lock); - return needsBuffer; -} - -static void rtsp_h264_low_res_capture(buffer_lock_t *buf_lock, buffer_t *buf) -{ - pthread_mutex_lock(&rtsp_lock); - for (DynamicH264Stream *stream = rtsp_streams; stream; stream = stream->pNextStream) { - stream->receiveData(buf, true); + stream->receiveData(buf); } pthread_mutex_unlock(&rtsp_lock); } @@ -280,10 +240,8 @@ extern "C" int rtsp_server(rtsp_options_t *options) // LOG_INFO(NULL, "The RTSP-over-HTTP is not available."); // } - buffer_lock_register_check_streaming(&http_h264, rtsp_h264_needs_buffer); - buffer_lock_register_notify_buffer(&http_h264, rtsp_h264_capture); - buffer_lock_register_check_streaming(&http_h264_lowres, rtsp_h264_low_res_needs_buffer); - buffer_lock_register_notify_buffer(&http_h264_lowres, rtsp_h264_low_res_capture); + buffer_lock_register_check_streaming(&video_lock, rtsp_h264_needs_buffer); + buffer_lock_register_notify_buffer(&video_lock, rtsp_h264_capture); pthread_create(&rtsp_thread, NULL, rtsp_server_thread, env); return 0; @@ -299,4 +257,4 @@ extern "C" int rtsp_server(rtsp_options_t *options) return 0; } -#endif // USE_RTSP \ No newline at end of file +#endif // USE_RTSP diff --git a/output/webrtc/webrtc.cc b/output/webrtc/webrtc.cc index f4df590..5bf22db 100644 --- a/output/webrtc/webrtc.cc +++ b/output/webrtc/webrtc.cc @@ -76,7 +76,7 @@ class Client { public: Client(std::shared_ptr pc_) - : pc(pc_), use_low_res(false) + : pc(pc_) { id.resize(20); for (auto & c : id) { @@ -91,18 +91,16 @@ public: free(name); } - bool wantsFrame(bool low_res) const + bool wantsFrame() const { if (!pc || !video) return false; if (pc->state() != rtc::PeerConnection::State::Connected) return false; - if (use_low_res != low_res) - return false; return video->wantsFrame(); } - void pushFrame(buffer_t *buf, bool low_res) + void pushFrame(buffer_t *buf) { auto self = this; @@ -110,10 +108,6 @@ public: return; } - if (use_low_res != low_res) { - return; - } - if (!had_key_frame) { if (!buf->flags.is_keyframe) { device_video_force_key(buf->buf_list->dev); @@ -136,7 +130,6 @@ public: std::mutex lock; std::condition_variable wait_for_complete; bool had_key_frame; - bool use_low_res; }; std::shared_ptr findClient(std::string id) @@ -232,9 +225,7 @@ static bool webrtc_h264_needs_buffer(buffer_lock_t *buf_lock) { std::unique_lock lk(webrtc_clients_lock); for (auto client : webrtc_clients) { - if (client->wantsFrame(false)) - return true; - if (!http_h264_lowres.buf_list && client->wantsFrame(true)) + if (client->wantsFrame()) return true; } @@ -245,31 +236,8 @@ static void webrtc_h264_capture(buffer_lock_t *buf_lock, buffer_t *buf) { std::unique_lock lk(webrtc_clients_lock); for (auto client : webrtc_clients) { - if (client->wantsFrame(false)) - client->pushFrame(buf, false); - if (!http_h264_lowres.buf_list && client->wantsFrame(true)) - client->pushFrame(buf, true); - } -} - -static bool webrtc_h264_low_res_needs_buffer(buffer_lock_t *buf_lock) -{ - std::unique_lock lk(webrtc_clients_lock); - for (auto client : webrtc_clients) { - if (client->wantsFrame(true)) - return true; - } - - return false; -} - -static void webrtc_h264_low_res_capture(buffer_lock_t *buf_lock, buffer_t *buf) -{ - std::unique_lock lk(webrtc_clients_lock); - for (auto client : webrtc_clients) { - if (client->wantsFrame(true)) { - client->pushFrame(buf, true); - } + if (client->wantsFrame()) + client->pushFrame(buf); } } @@ -279,9 +247,6 @@ static void http_webrtc_request(http_worker_t *worker, FILE *stream, const nlohm LOG_INFO(client.get(), "Stream requested."); client->video = addVideo(client->pc, webrtc_client_video_payload_type, rand(), "video", ""); - if (message.contains("res")) { - client->use_low_res = (message["res"] == "low"); - } try { { @@ -418,10 +383,8 @@ extern "C" void http_webrtc_offer(http_worker_t *worker, FILE *stream) extern "C" void webrtc_server() { - buffer_lock_register_check_streaming(&http_h264, webrtc_h264_needs_buffer); - buffer_lock_register_notify_buffer(&http_h264, webrtc_h264_capture); - buffer_lock_register_check_streaming(&http_h264_lowres, webrtc_h264_low_res_needs_buffer); - buffer_lock_register_notify_buffer(&http_h264_lowres, webrtc_h264_low_res_capture); + buffer_lock_register_check_streaming(&video_lock, webrtc_h264_needs_buffer); + buffer_lock_register_notify_buffer(&video_lock, webrtc_h264_capture); } #else // USE_LIBDATACHANNEL diff --git a/service/camera-streamer-arducam-16MP.service b/service/camera-streamer-arducam-16MP.service index 04294b4..50a2f62 100644 --- a/service/camera-streamer-arducam-16MP.service +++ b/service/camera-streamer-arducam-16MP.service @@ -13,10 +13,12 @@ ExecStart=/usr/local/bin/camera-streamer \ -camera-fps=15 \ ; use two memory buffers to optimise usage -camera-nbufs=2 \ - ; the high-res is 1552x1165 - -camera-high_res_factor=1.5 \ - ; the low-res is 776x582 - -camera-low_res_factor=3.0 \ + ; the snapshot is 1438x1080 + -camera-snapshot.height=1080 \ + ; the video/webrtc is 958x720 + -camera-video.height=720 \ + ; the stream is 639x480 + -camera-stream.height=480 \ ; bump brightness slightly -camera-options=brightness=0.1 \ ; disable auto-focus diff --git a/service/camera-streamer-arducam-64MP.service b/service/camera-streamer-arducam-64MP.service index 3f86797..61b0e62 100644 --- a/service/camera-streamer-arducam-64MP.service +++ b/service/camera-streamer-arducam-64MP.service @@ -15,10 +15,12 @@ ExecStart=/usr/local/bin/camera-streamer \ -camera-fps=30 \ ; use two memory buffers to optimise usage -camera-nbufs=2 \ - ; the high-res is 1552x1165 - -camera-high_res_factor=1.5 \ - ; the low-res is 776x582 - -camera-low_res_factor=3.0 \ + ; the snapshot is 1438x1080 + -camera-snapshot.height=1080 \ + ; the video/webrtc is 958x720 + -camera-video.height=720 \ + ; the stream is 639x480 + -camera-stream.height=480 \ ; bump brightness slightly -camera-options=brightness=0.1 \ ; disable auto-focus diff --git a/service/camera-streamer-raspi-v2-8MP.service b/service/camera-streamer-raspi-v2-8MP.service index 90e0abd..ec54c5d 100644 --- a/service/camera-streamer-raspi-v2-8MP.service +++ b/service/camera-streamer-raspi-v2-8MP.service @@ -16,10 +16,12 @@ ExecStart=/usr/local/bin/camera-streamer \ -camera-fps=30 \ ; use two memory buffers to optimise usage -camera-nbufs=2 \ - ; the high-res is 1640x1232 - -camera-high_res_factor=2 \ - ; the low-res is 820x616 - -camera-low_res_factor=4 \ + ; the snapshot is 1438x1080 + -camera-snapshot.height=1080 \ + ; the video/webrtc is 958x720 + -camera-video.height=720 \ + ; the stream is 639x480 + -camera-stream.height=480 \ ; bump brightness slightly -camera-options=brightness=0.1 \ -rtsp-port diff --git a/service/camera-streamer-raspi-v3-12MP.service b/service/camera-streamer-raspi-v3-12MP.service index 0fb15b1..6f1e126 100644 --- a/service/camera-streamer-raspi-v3-12MP.service +++ b/service/camera-streamer-raspi-v3-12MP.service @@ -16,10 +16,12 @@ ExecStart=/usr/local/bin/camera-streamer \ -camera-fps=30 \ ; use two memory buffers to optimise usage -camera-nbufs=2 \ - ; the high-res is 1920x1080 - -camera-high_res_factor=1.2 \ - ; the low-res is 1280x720 - -camera-low_res_factor=1.8 \ + ; the snapshot is 1920x1080 + -camera-snapshot.height=1080 \ + ; the video/webrtc is 1280x720 + -camera-video.height=720 \ + ; the stream is 853x480 + -camera-stream.height=480 \ -rtsp-port DynamicUser=yes diff --git a/service/camera-streamer-usb-cam.service b/service/camera-streamer-usb-cam.service index 4fb3f6d..124f38a 100644 --- a/service/camera-streamer-usb-cam.service +++ b/service/camera-streamer-usb-cam.service @@ -12,9 +12,11 @@ ExecStart=/usr/local/bin/camera-streamer \ ; use two memory buffers to optimise usage -camera-nbufs=2 \ ; the high-res is 1920x1080 - -camera-high_res_factor=1.0 \ - ; the low-res is 960x540 - -camera-low_res_factor=2.0 \ + -camera-snapshot.height=1080 \ + ; the video/webrtc is 1280x720 + -camera-video.height=720 \ + ; the stream is 853x480 + -camera-stream.height=480 \ -rtsp-port DynamicUser=yes