[previous] [index] [next]
In this tutorial we show how to use a stream to capture a stream of video frames.
Even though we are now working with a different media type and we are capturing instead of playback, you will see that this example is very similar to the previous tutorial 4.
Let's take a look at the code before we break it down:
#include <spa/param/video/format-utils.h>
#include <spa/debug/types.h>
#include <spa/param/video/type-info.h>
struct spa_video_info format;
};
static void on_process(void *userdata)
{
struct spa_buffer *buf;
return;
}
if (buf->datas[0].data == NULL)
return;
printf("got a frame of size %d\n", buf->datas[0].chunk->size);
}
static void on_param_changed(
void *userdata, uint32_t
id,
const struct spa_pod *
param)
{
if (
param == NULL ||
id != SPA_PARAM_Format)
return;
if (spa_format_parse(
param,
&
data->format.media_type,
&
data->format.media_subtype) < 0)
return;
if (
data->format.media_type != SPA_MEDIA_TYPE_video ||
data->format.media_subtype != SPA_MEDIA_SUBTYPE_raw)
return;
if (spa_format_video_raw_parse(
param, &
data->format.info.raw) < 0)
return;
printf("got video format:\n");
printf(
" format: %d (%s)\n",
data->format.info.raw.format,
spa_debug_type_find_name(spa_type_video_format,
data->format.info.raw.format));
printf(
" size: %dx%d\n",
data->format.info.raw.size.width,
data->format.info.raw.size.height);
printf(
" framerate: %d/%d\n",
data->format.info.raw.framerate.num,
data->format.info.raw.framerate.denom);
}
.param_changed = on_param_changed,
.process = on_process,
};
int main(int argc, char *argv[])
{
const struct spa_pod *params[1];
struct spa_pod_builder b = SPA_POD_BUILDER_INIT(
buffer,
sizeof(
buffer));
"video-capture",
NULL),
&stream_events,
params[0] = spa_pod_builder_add_object(&b,
SPA_TYPE_OBJECT_Format, SPA_PARAM_EnumFormat,
SPA_FORMAT_mediaType, SPA_POD_Id(SPA_MEDIA_TYPE_video),
SPA_FORMAT_mediaSubtype, SPA_POD_Id(SPA_MEDIA_SUBTYPE_raw),
SPA_FORMAT_VIDEO_format, SPA_POD_CHOICE_ENUM_Id(7,
SPA_VIDEO_FORMAT_RGB,
SPA_VIDEO_FORMAT_RGB,
SPA_VIDEO_FORMAT_RGBA,
SPA_VIDEO_FORMAT_RGBx,
SPA_VIDEO_FORMAT_BGRx,
SPA_VIDEO_FORMAT_YUY2,
SPA_VIDEO_FORMAT_I420),
SPA_FORMAT_VIDEO_size, SPA_POD_CHOICE_RANGE_Rectangle(
&SPA_RECTANGLE(320, 240),
&SPA_RECTANGLE(1, 1),
&SPA_RECTANGLE(4096, 4096)),
SPA_FORMAT_VIDEO_framerate, SPA_POD_CHOICE_RANGE_Fraction(
&SPA_FRACTION(25, 1),
&SPA_FRACTION(0, 1),
&SPA_FRACTION(1000, 1)));
argc > 1 ? (uint32_t)atoi(argv[1]) :
PW_ID_ANY,
params, 1);
return 0;
}
#define pw_log_warn(...)
Log a warning message.
PipeWire main-loop interface.
SPA_EXPORT int pw_main_loop_run(struct pw_main_loop *loop)
Start a main loop.
Definition: main-loop.c:149
SPA_EXPORT struct pw_main_loop * pw_main_loop_new(const struct spa_dict *props)
Create a new main loop.
Definition: main-loop.c:87
SPA_EXPORT void pw_main_loop_destroy(struct pw_main_loop *loop)
Destroy a main loop.
Definition: main-loop.c:98
SPA_EXPORT void pw_init(int *argc, char **argv[])
Initialize PipeWire.
Definition: pipewire.c:479
PipeWire stream object class.
int pw_stream_connect(struct pw_stream *stream, enum pw_direction direction, uint32_t target_id, enum pw_stream_flags flags, const struct spa_pod **params, uint32_t n_params)
Connect a stream for input or output on port_path.
Definition: stream.c:1530
void pw_stream_destroy(struct pw_stream *stream)
Destroy a stream.
Definition: stream.c:1361
#define PW_ID_ANY
Definition: core.h:52
#define PW_KEY_MEDIA_TYPE
Media.
Definition: keys.h:260
#define PW_KEY_MEDIA_ROLE
Role: Movie, Music, Camera, Screen, Communication, Game, Notification, DSP, Production,...
Definition: keys.h:264
#define PW_KEY_MEDIA_CATEGORY
Media Category: Playback, Capture, Duplex, Monitor, Manager.
Definition: keys.h:262
struct pw_loop * pw_main_loop_get_loop(struct pw_main_loop *loop)
Get the loop implementation.
Definition: main-loop.c:121
#define PW_DIRECTION_INPUT
Definition: port.h:48
@ PW_STREAM_FLAG_MAP_BUFFERS
mmap the buffers except DmaBuf
Definition: stream.h:244
@ PW_STREAM_FLAG_AUTOCONNECT
try to automatically connect this stream
Definition: stream.h:239
struct pw_stream * pw_stream_new_simple(struct pw_loop *loop, const char *name, struct pw_properties *props, const struct pw_stream_events *events, void *data)
Definition: stream.c:1298
struct pw_buffer * pw_stream_dequeue_buffer(struct pw_stream *stream)
Get a buffer that can be filled for playback streams or consumed for capture streams.
Definition: stream.c:1950
int pw_stream_queue_buffer(struct pw_stream *stream, struct pw_buffer *buffer)
Submit a buffer for playback or recycle a buffer for capture.
Definition: stream.c:1977
#define PW_VERSION_STREAM_EVENTS
Definition: stream.h:201
struct spa_buffer * buffer
the spa buffer
Definition: stream.h:165
SPA_EXPORT struct pw_properties * pw_properties_new(const char *key,...)
Make a new properties object.
Definition: properties.c:100
Events for a stream.
Definition: stream.h:200
Save as tutorial5.c and compile with:
gcc -Wall tutorial5.c -o tutorial5 -lm $(pkg-config --cflags --libs libpipewire-0.3)
Most of the application is structured like the previous tutorial 4.
We create a stream object with different properties to make it a Camera Video Capture stream.
"video-capture",
NULL),
&stream_events,
In addition to the process
event, we are also going to listen to a new event, param_changed
:
.param_changed = on_param_changed,
.process = on_process,
};
Because we capture a stream of a wide range of different video formats and resolutions, we have to describe our accepted formats in a different way:
const struct spa_pod *params[1];
struct spa_pod_builder b = SPA_POD_BUILDER_INIT(
buffer,
sizeof(
buffer));
params[0] = spa_pod_builder_add_object(&b,
SPA_TYPE_OBJECT_Format, SPA_PARAM_EnumFormat,
SPA_FORMAT_mediaType, SPA_POD_Id(SPA_MEDIA_TYPE_video),
SPA_FORMAT_mediaSubtype, SPA_POD_Id(SPA_MEDIA_SUBTYPE_raw),
SPA_FORMAT_VIDEO_format, SPA_POD_CHOICE_ENUM_Id(7,
SPA_VIDEO_FORMAT_RGB,
SPA_VIDEO_FORMAT_RGB,
SPA_VIDEO_FORMAT_RGBA,
SPA_VIDEO_FORMAT_RGBx,
SPA_VIDEO_FORMAT_BGRx,
SPA_VIDEO_FORMAT_YUY2,
SPA_VIDEO_FORMAT_I420),
SPA_FORMAT_VIDEO_size, SPA_POD_CHOICE_RANGE_Rectangle(
&SPA_RECTANGLE(320, 240),
&SPA_RECTANGLE(1, 1),
&SPA_RECTANGLE(4096, 4096)),
SPA_FORMAT_VIDEO_framerate, SPA_POD_CHOICE_RANGE_Fraction(
&SPA_FRACTION(25, 1),
&SPA_FRACTION(0, 1),
&SPA_FRACTION(1000, 1)));
This is using a struct spa_pod_builder
to make a struct spa_pod *
object in the buffer array on the stack. The parameter is of type SPA_PARAM_EnumFormat
which means that it enumerates the possible formats for this stream.
In this example we use the builder to create some CHOICE
entries for the format properties.
We have an enumeration of formats, we need to first give the amount of enumerations that follow, then the default (preferred) value, followed by alternatives in order of preference:
SPA_FORMAT_VIDEO_format, SPA_POD_CHOICE_ENUM_Id(7,
SPA_VIDEO_FORMAT_RGB,
SPA_VIDEO_FORMAT_RGB,
SPA_VIDEO_FORMAT_RGBA,
SPA_VIDEO_FORMAT_RGBx,
SPA_VIDEO_FORMAT_BGRx,
SPA_VIDEO_FORMAT_YUY2,
SPA_VIDEO_FORMAT_I420),
We also have a RANGE
of values for the size. We need to give a default (preferred) size and then a min and max value:
SPA_FORMAT_VIDEO_size, SPA_POD_CHOICE_RANGE_Rectangle(
&SPA_RECTANGLE(320, 240),
&SPA_RECTANGLE(1, 1),
&SPA_RECTANGLE(4096, 4096)),
We have something similar for the framerate.
Note that there are other video parameters that we don't specify here. This means that we don't have any restrictions for their values.
See SPA POD for more information about how to make these POD objects.
Now we're ready to connect the stream and run the main loop:
argc > 1 ? (uint32_t)atoi(argv[1]) :
PW_ID_ANY,
params, 1);
To connect we specify that we have a PW_DIRECTION_INPUT
stream. PW_ID_ANY
means that we are ok with connecting to any producer. We also allow the user to pass an optional target id.
We're setting the PW_STREAM_FLAG_AUTOCONNECT
flag to make an automatic connection to a suitable camera and PW_STREAM_FLAG_MAP_BUFFERS
to let the stream mmap the data for us.
And last we pass the extra parameters for our stream. Here we only have the allowed formats (SPA_PARAM_EnumFormat
).
Running the mainloop will start the connection and negotiation process. First our param_changed
event will be called with the format that was negotiated between our stream and the camera. This is always something that is compatible with what we enumerated in the EnumFormat param when we connected.
Let's take a look at how we can parse the format in the param_changed
event:
static void on_param_changed(
void *userdata, uint32_t
id,
const struct spa_pod *
param)
{
if (
param == NULL ||
id != SPA_PARAM_Format)
return;
First check if there is a param. A NULL param means that it is cleared. The id of the param tells you what param it is. We are only interested in Format param (SPA_PARAM_Format
).
We can parse the media type and subtype as below and ensure that it is of the right type. In our example this will always be true but when your EnumFormat contains different media types or subtypes, this is how you can parse them:
if (spa_format_parse(
param,
&
data->format.media_type,
&
data->format.media_subtype) < 0)
return;
if (
data->format.media_type != SPA_MEDIA_TYPE_video ||
data->format.media_subtype != SPA_MEDIA_SUBTYPE_raw)
return;
For the video/raw
media type/subtype there is a utility function to parse out the values into a struct spa_video_info
. This makes it easier to deal with.
if (spa_format_video_raw_parse(
param, &
data->format.info.raw) < 0)
return;
printf("got video format:\n");
printf(
" format: %d (%s)\n",
data->format.info.raw.format,
spa_debug_type_find_name(spa_type_video_format,
data->format.info.raw.format));
printf(
" size: %dx%d\n",
data->format.info.raw.size.width,
data->format.info.raw.size.height);
printf(
" framerate: %d/%d\n",
data->format.info.raw.framerate.num,
data->format.info.raw.framerate.denom);
}
In this example we dump the video size and parameters but in a real playback or capture application you might want to set up the screen or encoder to deal with the format.
After negotiation, the process function is called for each new frame. Check out tutorial 4 for another example.
static void on_process(void *userdata)
{
struct spa_buffer *buf;
return;
}
if (buf->datas[0].data == NULL)
return;
printf("got a frame of size %d\n", buf->datas[0].chunk->size);
}
In a real playback application, one would do something with the data, like copy it to the screen or encode it into a file.
[previous] [index] [next]