ocarina/core/audio.c
Anna Schumaker e1f13a7ef4 core/file: Create new functions for reading data from files
The readd(), readu(), and readhu() functions are all used to read
various forms of integers.  The readl() and readw() are for reading
either lines or individual whitespace-delimited words

Signed-off-by: Anna Schumaker <Anna@NoWheyCreamery.com>
2018-02-21 16:01:15 -05:00

338 lines
8.1 KiB
C

/*
* Copyright 2013 (c) Anna Schumaker.
*/
#include <core/audio.h>
#include <core/idle.h>
#include <core/playlist.h>
#include <core/settings.h>
#define LOAD_PLAYING (1 << 0) /* Begin playback after loading */
#define LOAD_HISTORY (1 << 1) /* Add the track to the history */
#define LOAD_DEFAULT (LOAD_PLAYING | LOAD_HISTORY)
static const char *SETTINGS_TRACK = "core.audio.cur";
static const char *SETTINGS_VOLUME = "core.audio.volume";
static struct file audio_file = FILE_INIT_DATA("", "cur_track", 0);
static struct track *audio_track = NULL;
static int audio_pause_count = -1;
static GstElement *audio_pipeline = NULL;
static GstElement *audio_source = NULL;
static GstElement *audio_decoder = NULL;
static GstElement *audio_converter = NULL;
static GstElement *audio_volume = NULL;
static GstElement *audio_sink = NULL;
static guint audio_bus_id = 0;
static struct audio_callbacks *audio_cb = NULL;
static bool __audio_change_state(GstState state)
{
if (audio_cur_state() == state)
return false;
return gst_element_set_state(audio_pipeline, state) != GST_STATE_CHANGE_FAILURE;
}
static struct track *__audio_load(struct track *track, unsigned int flags)
{
struct track *prev = audio_track;
gchar *path;
if (!track)
return NULL;
audio_track = track;
path = track_path(track);
if (audio_cur_state() != GST_STATE_NULL)
gst_element_set_state(audio_pipeline, GST_STATE_READY);
g_object_set(G_OBJECT(audio_source), "location", path, NULL);
gst_element_set_state(audio_pipeline, flags & LOAD_PLAYING ?
GST_STATE_PLAYING : GST_STATE_PAUSED);
playlist_played(prev);
if (prev && TRACK_IS_EXTERNAL(prev))
track_free_external(prev);
playlist_selected(track);
if (flags & LOAD_HISTORY && !TRACK_IS_EXTERNAL(track))
playlist_add(playlist_lookup(PL_SYSTEM, "History"), track);
if (audio_cb)
audio_cb->audio_cb_load(track);
audio_save();
g_free(path);
return track;
}
static void __audio_pad_added(GstElement *element, GstPad *pad, gpointer data)
{
GstPad *sink = gst_element_get_static_pad(audio_decoder, "sink");
gst_element_link(element, audio_converter);
gst_pad_link(pad, sink);
gst_object_unref(sink);
}
static gboolean __audio_message(GstBus *bus, GstMessage *message, gpointer data)
{
GstObject *source = GST_OBJECT(GST_MESSAGE_SRC(message));
gchar *debug = NULL;
GError *error = NULL;
GstState old, state, next;
unsigned int load_flags = LOAD_DEFAULT;
switch (GST_MESSAGE_TYPE(message)) {
case GST_MESSAGE_ERROR:
gst_message_parse_error(message, &error, &debug);
g_printerr("ERROR from element %s: %s\n",
GST_OBJECT_NAME(source), error->message);
g_printerr("DEBUG details: %s\n", debug ? debug : "none");
g_error_free(error);
g_free(debug);
if (audio_cur_state() != GST_STATE_PLAYING)
load_flags = LOAD_HISTORY;
__audio_load(playlist_next(), load_flags);
break;
case GST_MESSAGE_EOS:
track_played(audio_track);
if (audio_pause_count >= 0) {
audio_pause_after(audio_pause_count - 1);
if (audio_pause_count == -1)
load_flags = LOAD_HISTORY;
}
__audio_load(playlist_next(), load_flags);
break;
case GST_MESSAGE_STATE_CHANGED:
if (!audio_cb || source != GST_OBJECT(audio_pipeline))
break;
gst_message_parse_state_changed(message, &old, &state, &next);
if (state == GST_STATE_PLAYING || state == GST_STATE_PAUSED) {
if (next == GST_STATE_VOID_PENDING)
audio_cb->audio_cb_state_change(state);
}
default:
break;
}
return true;
}
static bool __audio_init_idle(void *data)
{
unsigned int track;
if (settings_has(SETTINGS_TRACK)) {
track = settings_get(SETTINGS_TRACK);
__audio_load(track_get(track), LOAD_HISTORY);
} else if (file_open(&audio_file, OPEN_READ)) {
track = file_readu(&audio_file);
file_close(&audio_file);
file_remove(&audio_file);
__audio_load(track_get(track), LOAD_HISTORY);
}
return true;
}
void audio_init(int *argc, char ***argv, struct audio_callbacks *callbacks)
{
unsigned int volume = 100;
GstBus *bus;
gst_init(argc, argv);
audio_cb = callbacks;
audio_pipeline = gst_pipeline_new("pipeline");
audio_source = gst_element_factory_make("filesrc", "source");
audio_decoder = gst_element_factory_make("decodebin", "decoder");
audio_converter = gst_element_factory_make("audioconvert", "converter");
audio_volume = gst_element_factory_make("volume", "volume");
audio_sink = gst_element_factory_make("autoaudiosink", "sink");
bus = gst_pipeline_get_bus(GST_PIPELINE(audio_pipeline));
audio_bus_id = gst_bus_add_watch(bus, __audio_message, NULL);
gst_bin_add_many(GST_BIN(audio_pipeline), audio_source, audio_decoder,
audio_converter, audio_volume,
audio_sink, NULL);
gst_element_link(audio_source, audio_decoder);
gst_element_link_many(audio_converter, audio_volume, audio_sink, NULL);
g_signal_connect(audio_decoder, "pad-added", G_CALLBACK(__audio_pad_added), NULL);
gst_object_unref(bus);
if (settings_has(SETTINGS_VOLUME))
volume = settings_get(SETTINGS_VOLUME);
audio_set_volume(volume);
idle_schedule(IDLE_SYNC, __audio_init_idle, NULL);
}
void audio_deinit()
{
gst_element_set_state(audio_pipeline, GST_STATE_NULL);
gst_object_unref(GST_ELEMENT(audio_pipeline));
g_source_remove(audio_bus_id);
audio_pipeline = NULL;
audio_source = NULL;
audio_decoder = NULL;
audio_converter = NULL;
audio_volume = NULL;
audio_sink = NULL;
audio_track = NULL;
gst_deinit();
}
void audio_save()
{
if (audio_track && !TRACK_IS_EXTERNAL(audio_track))
settings_set(SETTINGS_TRACK, track_index(audio_track));
}
bool audio_load(struct track *track)
{
if (track == audio_track)
return false;
return __audio_load(track, LOAD_DEFAULT) != NULL;
}
bool audio_load_filepath(const gchar *filepath)
{
struct track *track;
if (!filepath)
return false;
track = track_lookup(filepath);
if (!track)
track = track_alloc_external(filepath);
return audio_load(track);
}
struct track *audio_cur_track()
{
return audio_track;
}
GstState audio_cur_state()
{
GstState cur = GST_STATE_NULL;
if (audio_pipeline)
gst_element_get_state(audio_pipeline,
&cur, NULL,
GST_CLOCK_TIME_NONE);
return cur;
}
void audio_set_volume(unsigned int volume)
{
gdouble vol;
if (volume > 100)
volume = 100;
vol = (gdouble)volume / 100;
settings_set(SETTINGS_VOLUME, volume);
g_object_set(G_OBJECT(audio_volume), "volume", vol, NULL);
}
unsigned int audio_get_volume()
{
gdouble volume;
g_object_get(G_OBJECT(audio_volume), "volume", &volume, NULL);
return volume * 100;
}
bool audio_play()
{
if (!audio_track)
return false;
return __audio_change_state(GST_STATE_PLAYING);
}
bool audio_pause()
{
if (!audio_track)
return false;
return __audio_change_state(GST_STATE_PAUSED);
}
bool audio_seek(gint64 offset)
{
if (!audio_track)
return false;
return gst_element_seek_simple(audio_pipeline,
GST_FORMAT_TIME,
GST_SEEK_FLAG_FLUSH,
offset);
}
gint64 audio_position()
{
gint64 position;
if (gst_element_query_position(audio_pipeline,
GST_FORMAT_TIME,
&position))
return position;
return 0;
}
gint64 audio_duration()
{
gint64 duration;
if (gst_element_query_duration(audio_pipeline,
GST_FORMAT_TIME,
&duration))
return duration;
if (audio_track)
return audio_track->tr_length * GST_SECOND;
return 0;
}
struct track *audio_next()
{
return __audio_load(playlist_next(), LOAD_DEFAULT);
}
struct track *audio_prev()
{
return __audio_load(playlist_prev(), LOAD_PLAYING);
}
void audio_pause_after(int n)
{
if (n != audio_pause_count) {
audio_pause_count = n;
if (audio_cb)
audio_cb->audio_cb_config_pause(audio_pause_count);
}
}
#ifdef CONFIG_TESTING
void test_audio_eos()
{
GstMessage *message = gst_message_new_eos(GST_OBJECT(audio_pipeline));
__audio_message(NULL, message, NULL);
gst_message_unref(message);
}
void test_audio_error(GError *error, gchar *debug)
{
GstMessage *message = gst_message_new_error(
GST_OBJECT(audio_pipeline), error, debug);
__audio_message(NULL, message, NULL);
gst_message_unref(message);
}
GstElement *test_audio_pipeline()
{
return audio_pipeline;
}
#endif /* CONFIG_TESTING */