2 Copyright 2008-2012 David Robillard <http://drobilla.net>
4 Permission to use, copy, modify, and/or distribute this software for any
5 purpose with or without fee is hereby granted, provided that the above
6 copyright notice and this permission notice appear in all copies.
8 THIS SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
21 #include "lv2/lv2plug.in/ns/ext/atom/atom.h"
22 #include "lv2/lv2plug.in/ns/ext/event/event.h"
24 #include "lv2_evbuf.h"
26 struct LV2_Evbuf_Impl {
30 uint32_t atom_Sequence;
32 LV2_Event_Buffer event;
33 LV2_Atom_Sequence atom;
37 static inline uint32_t
38 lv2_evbuf_pad_size(uint32_t size)
40 return (size + 7) & (~7);
44 lv2_evbuf_new(uint32_t capacity,
47 uint32_t atom_Sequence)
49 // FIXME: memory must be 64-bit aligned
50 LV2_Evbuf* evbuf = (LV2_Evbuf*)malloc(
51 sizeof(LV2_Evbuf) + sizeof(LV2_Atom_Sequence) + capacity);
52 evbuf->capacity = capacity;
53 evbuf->atom_Chunk = atom_Chunk;
54 evbuf->atom_Sequence = atom_Sequence;
55 lv2_evbuf_set_type(evbuf, type);
56 lv2_evbuf_reset(evbuf, true);
61 lv2_evbuf_free(LV2_Evbuf* evbuf)
67 lv2_evbuf_set_type(LV2_Evbuf* evbuf, LV2_Evbuf_Type type)
72 evbuf->buf.event.data = (uint8_t*)(evbuf + 1);
73 evbuf->buf.event.capacity = evbuf->capacity;
78 lv2_evbuf_reset(evbuf, true);
82 lv2_evbuf_reset(LV2_Evbuf* evbuf, bool input)
84 switch (evbuf->type) {
86 evbuf->buf.event.header_size = sizeof(LV2_Event_Buffer);
87 evbuf->buf.event.stamp_type = LV2_EVENT_AUDIO_STAMP;
88 evbuf->buf.event.event_count = 0;
89 evbuf->buf.event.size = 0;
93 evbuf->buf.atom.atom.size = sizeof(LV2_Atom_Sequence_Body);
94 evbuf->buf.atom.atom.type = evbuf->atom_Sequence;
96 evbuf->buf.atom.atom.size = evbuf->capacity;
97 evbuf->buf.atom.atom.type = evbuf->atom_Chunk;
103 lv2_evbuf_get_size(LV2_Evbuf* evbuf)
105 switch (evbuf->type) {
106 case LV2_EVBUF_EVENT:
107 return evbuf->buf.event.size;
109 assert(evbuf->buf.atom.atom.type != evbuf->atom_Sequence
110 || evbuf->buf.atom.atom.size >= sizeof(LV2_Atom_Sequence_Body));
111 return evbuf->buf.atom.atom.type == evbuf->atom_Sequence
112 ? evbuf->buf.atom.atom.size - sizeof(LV2_Atom_Sequence_Body)
119 lv2_evbuf_get_capacity(LV2_Evbuf* evbuf)
121 return evbuf->capacity;
125 lv2_evbuf_get_buffer(LV2_Evbuf* evbuf)
127 switch (evbuf->type) {
128 case LV2_EVBUF_EVENT:
129 return &evbuf->buf.event;
131 return &evbuf->buf.atom;
137 lv2_evbuf_begin(LV2_Evbuf* evbuf)
139 LV2_Evbuf_Iterator iter = { evbuf, 0 };
144 lv2_evbuf_end(LV2_Evbuf* evbuf)
146 const uint32_t size = lv2_evbuf_get_size(evbuf);
147 const LV2_Evbuf_Iterator iter = { evbuf, lv2_evbuf_pad_size(size) };
152 lv2_evbuf_is_valid(LV2_Evbuf_Iterator iter)
154 return iter.offset < lv2_evbuf_get_size(iter.evbuf);
158 lv2_evbuf_next(LV2_Evbuf_Iterator iter)
160 if (!lv2_evbuf_is_valid(iter)) {
164 LV2_Evbuf* evbuf = iter.evbuf;
165 uint32_t offset = iter.offset;
167 switch (evbuf->type) {
168 case LV2_EVBUF_EVENT:
169 size = ((LV2_Event*)((uintptr_t)(evbuf->buf.event.data + offset)))->size;
170 offset += lv2_evbuf_pad_size(sizeof(LV2_Event) + size);
173 size = ((LV2_Atom_Event*)((uintptr_t)
174 ((char*)LV2_ATOM_CONTENTS(LV2_Atom_Sequence, &evbuf->buf.atom)
175 + offset)))->body.size;
176 offset += lv2_evbuf_pad_size(sizeof(LV2_Atom_Event) + size);
180 LV2_Evbuf_Iterator next = { evbuf, offset };
185 lv2_evbuf_get(LV2_Evbuf_Iterator iter,
192 *samples = *subframes = *type = *size = 0;
195 if (!lv2_evbuf_is_valid(iter)) {
199 LV2_Event_Buffer* ebuf;
201 LV2_Atom_Sequence* aseq;
203 switch (iter.evbuf->type) {
204 case LV2_EVBUF_EVENT:
205 ebuf = &iter.evbuf->buf.event;
206 ev = (LV2_Event*)((uintptr_t)((char*)ebuf->data + iter.offset));
207 *samples = ev->frames;
208 *subframes = ev->subframes;
211 *data = (uint8_t*)ev + sizeof(LV2_Event);
214 aseq = (LV2_Atom_Sequence*)&iter.evbuf->buf.atom;
215 aev = (LV2_Atom_Event*)((uintptr_t)(
216 (char*)LV2_ATOM_CONTENTS(LV2_Atom_Sequence, aseq)
218 *samples = aev->time.frames;
220 *type = aev->body.type;
221 *size = aev->body.size;
222 *data = (uint8_t*)LV2_ATOM_BODY(&aev->body);
230 lv2_evbuf_write(LV2_Evbuf_Iterator* iter,
237 LV2_Event_Buffer* ebuf;
239 LV2_Atom_Sequence* aseq;
241 switch (iter->evbuf->type) {
242 case LV2_EVBUF_EVENT:
243 ebuf = &iter->evbuf->buf.event;
244 if (ebuf->capacity - ebuf->size < sizeof(LV2_Event) + size) {
248 ev = (LV2_Event*)((uintptr_t)(ebuf->data + iter->offset));
249 ev->frames = samples;
250 ev->subframes = subframes;
253 memcpy((uint8_t*)ev + sizeof(LV2_Event), data, size);
255 size = lv2_evbuf_pad_size(sizeof(LV2_Event) + size);
257 ebuf->event_count += 1;
258 iter->offset += size;
261 aseq = (LV2_Atom_Sequence*)&iter->evbuf->buf.atom;
262 if (iter->evbuf->capacity - sizeof(LV2_Atom) - aseq->atom.size
263 < sizeof(LV2_Atom_Event) + size) {
267 aev = (LV2_Atom_Event*)((uintptr_t)(
268 (char*)LV2_ATOM_CONTENTS(LV2_Atom_Sequence, aseq)
270 aev->time.frames = samples;
271 aev->body.type = type;
272 aev->body.size = size;
273 memcpy(LV2_ATOM_BODY(&aev->body), data, size);
275 size = lv2_evbuf_pad_size(sizeof(LV2_Atom_Event) + size);
276 aseq->atom.size += size;
277 iter->offset += size;