2 Copyright 2008-2012 David Robillard <http://drobilla.net>
4 Permission to use, copy, modify, and/or distribute this software for any
5 purpose with or without fee is hereby granted, provided that the above
6 copyright notice and this permission notice appear in all copies.
8 THIS SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 #include "lv2/lv2plug.in/ns/ext/atom/atom.h"
21 #include "lv2/lv2plug.in/ns/ext/event/event.h"
23 #include "lv2_evbuf.h"
25 struct LV2_Evbuf_Impl {
29 uint32_t atom_Sequence;
31 LV2_Event_Buffer event;
32 LV2_Atom_Sequence atom;
36 static inline uint32_t
37 lv2_evbuf_pad_size(uint32_t size)
39 return (size + 7) & (~7);
43 lv2_evbuf_new(uint32_t capacity,
46 uint32_t atom_Sequence)
48 // FIXME: memory must be 64-bit aligned
49 LV2_Evbuf* evbuf = (LV2_Evbuf*)malloc(
50 sizeof(LV2_Evbuf) + sizeof(LV2_Atom_Sequence) + capacity);
51 evbuf->capacity = capacity;
52 evbuf->atom_Chunk = atom_Chunk;
53 evbuf->atom_Sequence = atom_Sequence;
54 lv2_evbuf_set_type(evbuf, type);
55 lv2_evbuf_reset(evbuf, true);
60 lv2_evbuf_free(LV2_Evbuf* evbuf)
66 lv2_evbuf_set_type(LV2_Evbuf* evbuf, LV2_Evbuf_Type type)
71 evbuf->buf.event.data = (uint8_t*)(evbuf + 1);
72 evbuf->buf.event.capacity = evbuf->capacity;
77 lv2_evbuf_reset(evbuf, true);
81 lv2_evbuf_reset(LV2_Evbuf* evbuf, bool input)
83 switch (evbuf->type) {
85 evbuf->buf.event.header_size = sizeof(LV2_Event_Buffer);
86 evbuf->buf.event.stamp_type = LV2_EVENT_AUDIO_STAMP;
87 evbuf->buf.event.event_count = 0;
88 evbuf->buf.event.size = 0;
92 evbuf->buf.atom.atom.size = 0;
93 evbuf->buf.atom.atom.type = evbuf->atom_Sequence;
95 evbuf->buf.atom.atom.size = evbuf->capacity;
96 evbuf->buf.atom.atom.type = evbuf->atom_Chunk;
102 lv2_evbuf_get_size(LV2_Evbuf* evbuf)
104 switch (evbuf->type) {
105 case LV2_EVBUF_EVENT:
106 return evbuf->buf.event.size;
108 return evbuf->buf.atom.atom.type == evbuf->atom_Sequence
109 ? evbuf->buf.atom.atom.size
116 lv2_evbuf_get_buffer(LV2_Evbuf* evbuf)
118 switch (evbuf->type) {
119 case LV2_EVBUF_EVENT:
120 return &evbuf->buf.event;
122 return &evbuf->buf.atom;
128 lv2_evbuf_begin(LV2_Evbuf* evbuf)
130 LV2_Evbuf_Iterator iter = { evbuf, 0 };
135 lv2_evbuf_end(LV2_Evbuf* evbuf)
137 const size_t size = lv2_evbuf_get_size(evbuf);
138 const LV2_Evbuf_Iterator iter = { evbuf, lv2_evbuf_pad_size(size) };
143 lv2_evbuf_is_valid(LV2_Evbuf_Iterator iter)
145 return iter.offset < lv2_evbuf_get_size(iter.evbuf);
149 lv2_evbuf_next(LV2_Evbuf_Iterator iter)
151 if (!lv2_evbuf_is_valid(iter)) {
155 LV2_Evbuf* evbuf = iter.evbuf;
156 uint32_t offset = iter.offset;
158 switch (evbuf->type) {
159 case LV2_EVBUF_EVENT:
160 size = ((LV2_Event*)(evbuf->buf.event.data + offset))->size;
161 offset += lv2_evbuf_pad_size(sizeof(LV2_Event) + size);
164 size = ((LV2_Atom_Event*)
165 ((char*)LV2_ATOM_CONTENTS(LV2_Atom_Sequence, &evbuf->buf.atom)
166 + offset))->body.size;
167 offset += lv2_evbuf_pad_size(sizeof(LV2_Atom_Event) + size);
171 LV2_Evbuf_Iterator next = { evbuf, offset };
176 lv2_evbuf_get(LV2_Evbuf_Iterator iter,
183 *frames = *subframes = *type = *size = 0;
186 if (!lv2_evbuf_is_valid(iter)) {
190 LV2_Event_Buffer* ebuf;
192 LV2_Atom_Sequence* aseq;
194 switch (iter.evbuf->type) {
195 case LV2_EVBUF_EVENT:
196 ebuf = &iter.evbuf->buf.event;
197 ev = (LV2_Event*)ebuf->data + iter.offset;
198 *frames = ev->frames;
199 *subframes = ev->subframes;
202 *data = (uint8_t*)ev + sizeof(LV2_Event);
205 aseq = (LV2_Atom_Sequence*)&iter.evbuf->buf.atom;
206 aev = (LV2_Atom_Event*)(
207 (char*)LV2_ATOM_CONTENTS(LV2_Atom_Sequence, aseq)
209 *frames = aev->time.frames;
211 *type = aev->body.type;
212 *size = aev->body.size;
213 *data = LV2_ATOM_BODY(&aev->body);
221 lv2_evbuf_write(LV2_Evbuf_Iterator* iter,
228 LV2_Event_Buffer* ebuf;
230 LV2_Atom_Sequence* aseq;
232 switch (iter->evbuf->type) {
233 case LV2_EVBUF_EVENT:
234 ebuf = &iter->evbuf->buf.event;
235 if (ebuf->capacity - ebuf->size < sizeof(LV2_Event) + size) {
239 ev = (LV2_Event*)(ebuf->data + iter->offset);
241 ev->subframes = subframes;
244 memcpy((uint8_t*)ev + sizeof(LV2_Event), data, size);
246 size = lv2_evbuf_pad_size(sizeof(LV2_Event) + size);
248 ebuf->event_count += 1;
249 iter->offset += size;
252 aseq = (LV2_Atom_Sequence*)&iter->evbuf->buf.atom;
253 if (iter->evbuf->capacity - sizeof(LV2_Atom) - aseq->atom.size
254 < sizeof(LV2_Atom_Event) + size) {
258 aev = (LV2_Atom_Event*)(
259 (char*)LV2_ATOM_CONTENTS(LV2_Atom_Sequence, aseq)
261 aev->time.frames = frames;
262 aev->body.type = type;
263 aev->body.size = size;
264 memcpy(LV2_ATOM_BODY(&aev->body), data, size);
266 size = lv2_evbuf_pad_size(sizeof(LV2_Atom_Event) + size);
267 aseq->atom.size += size;
268 iter->offset += size;