2 Copyright 2008-2012 David Robillard <http://drobilla.net>
4 Permission to use, copy, modify, and/or distribute this software for any
5 purpose with or without fee is hereby granted, provided that the above
6 copyright notice and this permission notice appear in all copies.
8 THIS SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
21 #include "lv2/lv2plug.in/ns/ext/atom/atom.h"
22 #include "lv2/lv2plug.in/ns/ext/event/event.h"
24 #include "lv2_evbuf.h"
26 struct LV2_Evbuf_Impl {
30 uint32_t atom_Sequence;
32 LV2_Event_Buffer event;
33 LV2_Atom_Sequence atom;
37 static inline uint32_t
38 lv2_evbuf_pad_size(uint32_t size)
40 return (size + 7) & (~7);
44 lv2_evbuf_new(uint32_t capacity,
47 uint32_t atom_Sequence)
49 // FIXME: memory must be 64-bit aligned
50 LV2_Evbuf* evbuf = (LV2_Evbuf*)malloc(
51 sizeof(LV2_Evbuf) + sizeof(LV2_Atom_Sequence) + capacity);
52 evbuf->capacity = capacity;
53 evbuf->atom_Chunk = atom_Chunk;
54 evbuf->atom_Sequence = atom_Sequence;
55 lv2_evbuf_set_type(evbuf, type);
56 lv2_evbuf_reset(evbuf, true);
61 lv2_evbuf_free(LV2_Evbuf* evbuf)
67 lv2_evbuf_set_type(LV2_Evbuf* evbuf, LV2_Evbuf_Type type)
72 evbuf->buf.event.data = (uint8_t*)(evbuf + 1);
73 evbuf->buf.event.capacity = evbuf->capacity;
78 lv2_evbuf_reset(evbuf, true);
82 lv2_evbuf_reset(LV2_Evbuf* evbuf, bool input)
84 switch (evbuf->type) {
86 evbuf->buf.event.header_size = sizeof(LV2_Event_Buffer);
87 evbuf->buf.event.stamp_type = LV2_EVENT_AUDIO_STAMP;
88 evbuf->buf.event.event_count = 0;
89 evbuf->buf.event.size = 0;
93 evbuf->buf.atom.atom.size = sizeof(LV2_Atom_Sequence_Body);
94 evbuf->buf.atom.atom.type = evbuf->atom_Sequence;
96 evbuf->buf.atom.atom.size = evbuf->capacity;
97 evbuf->buf.atom.atom.type = evbuf->atom_Chunk;
103 lv2_evbuf_get_size(LV2_Evbuf* evbuf)
105 switch (evbuf->type) {
106 case LV2_EVBUF_EVENT:
107 return evbuf->buf.event.size;
109 assert(evbuf->buf.atom.atom.type != evbuf->atom_Sequence
110 || evbuf->buf.atom.atom.size >= sizeof(LV2_Atom_Sequence_Body));
111 return evbuf->buf.atom.atom.type == evbuf->atom_Sequence
112 ? evbuf->buf.atom.atom.size - sizeof(LV2_Atom_Sequence_Body)
119 lv2_evbuf_get_buffer(LV2_Evbuf* evbuf)
121 switch (evbuf->type) {
122 case LV2_EVBUF_EVENT:
123 return &evbuf->buf.event;
125 return &evbuf->buf.atom;
131 lv2_evbuf_begin(LV2_Evbuf* evbuf)
133 LV2_Evbuf_Iterator iter = { evbuf, 0 };
138 lv2_evbuf_end(LV2_Evbuf* evbuf)
140 const uint32_t size = lv2_evbuf_get_size(evbuf);
141 const LV2_Evbuf_Iterator iter = { evbuf, lv2_evbuf_pad_size(size) };
146 lv2_evbuf_is_valid(LV2_Evbuf_Iterator iter)
148 return iter.offset < lv2_evbuf_get_size(iter.evbuf);
152 lv2_evbuf_next(LV2_Evbuf_Iterator iter)
154 if (!lv2_evbuf_is_valid(iter)) {
158 LV2_Evbuf* evbuf = iter.evbuf;
159 uint32_t offset = iter.offset;
161 switch (evbuf->type) {
162 case LV2_EVBUF_EVENT:
163 size = ((LV2_Event*)(evbuf->buf.event.data + offset))->size;
164 offset += lv2_evbuf_pad_size(sizeof(LV2_Event) + size);
167 size = ((LV2_Atom_Event*)
168 ((char*)LV2_ATOM_CONTENTS(LV2_Atom_Sequence, &evbuf->buf.atom)
169 + offset))->body.size;
170 offset += lv2_evbuf_pad_size(sizeof(LV2_Atom_Event) + size);
174 LV2_Evbuf_Iterator next = { evbuf, offset };
179 lv2_evbuf_get(LV2_Evbuf_Iterator iter,
186 *frames = *subframes = *type = *size = 0;
189 if (!lv2_evbuf_is_valid(iter)) {
193 LV2_Event_Buffer* ebuf;
195 LV2_Atom_Sequence* aseq;
197 switch (iter.evbuf->type) {
198 case LV2_EVBUF_EVENT:
199 ebuf = &iter.evbuf->buf.event;
200 ev = (LV2_Event*)ebuf->data + iter.offset;
201 *frames = ev->frames;
202 *subframes = ev->subframes;
205 *data = (uint8_t*)ev + sizeof(LV2_Event);
208 aseq = (LV2_Atom_Sequence*)&iter.evbuf->buf.atom;
209 aev = (LV2_Atom_Event*)(
210 (char*)LV2_ATOM_CONTENTS(LV2_Atom_Sequence, aseq)
212 *frames = aev->time.frames;
214 *type = aev->body.type;
215 *size = aev->body.size;
216 *data = LV2_ATOM_BODY(&aev->body);
224 lv2_evbuf_write(LV2_Evbuf_Iterator* iter,
231 LV2_Event_Buffer* ebuf;
233 LV2_Atom_Sequence* aseq;
235 switch (iter->evbuf->type) {
236 case LV2_EVBUF_EVENT:
237 ebuf = &iter->evbuf->buf.event;
238 if (ebuf->capacity - ebuf->size < sizeof(LV2_Event) + size) {
242 ev = (LV2_Event*)(ebuf->data + iter->offset);
244 ev->subframes = subframes;
247 memcpy((uint8_t*)ev + sizeof(LV2_Event), data, size);
249 size = lv2_evbuf_pad_size(sizeof(LV2_Event) + size);
251 ebuf->event_count += 1;
252 iter->offset += size;
255 aseq = (LV2_Atom_Sequence*)&iter->evbuf->buf.atom;
256 if (iter->evbuf->capacity - sizeof(LV2_Atom) - aseq->atom.size
257 < sizeof(LV2_Atom_Event) + size) {
261 aev = (LV2_Atom_Event*)(
262 (char*)LV2_ATOM_CONTENTS(LV2_Atom_Sequence, aseq)
264 aev->time.frames = frames;
265 aev->body.type = type;
266 aev->body.size = size;
267 memcpy(LV2_ATOM_BODY(&aev->body), data, size);
269 size = lv2_evbuf_pad_size(sizeof(LV2_Atom_Event) + size);
270 aseq->atom.size += size;
271 iter->offset += size;