Mstdlib-1.24.0
m_io_layer.h
1/* The MIT License (MIT)
2 *
3 * Copyright (c) 2017 Monetra Technologies, LLC.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a copy
6 * of this software and associated documentation files (the "Software"), to deal
7 * in the Software without restriction, including without limitation the rights
8 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 * copies of the Software, and to permit persons to whom the Software is
10 * furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be included in
13 * all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
21 * THE SOFTWARE.
22 */
23
24#ifndef __M_IO_LAYER_H__
25#define __M_IO_LAYER_H__
26
27#include <mstdlib/mstdlib_io.h>
28
29__BEGIN_DECLS
30
31/*! \addtogroup m_io_layer Functions for creating and using custom I/O layers
32 * \ingroup m_eventio_semipublic
33 *
34 * Included using the semi-public header of <mstdlib/io/m_io_layer.h>
35 *
36 * This is a semi-public header meant for those writing their own io layers.
37 * This could be a low level layer that's not current supported for comms. More
38 * commonly it would be intermediate layers to accommodate specific data handling.
39 *
40 * # Layer Design
41 *
42 * Layers are stacked with the application being on top and the comms layer on
43 * the bottom (typically comms later is the bottom layer). Layers in between are
44 * typically data processing layers. For example, Application, TLS, and network.
45 * Where the TLS layer is an intermediate data processing layer.
46 *
47 * Intermediate layers are bidirectional with data flowing down and up.
48 *
49 * ## Processing Events Callback
50 *
51 * The `processevent_cb` set by `M_io_callbacks_reg_processevent()` flows upward. From
52 * the bottom comms layer through the intermediate layer and then to the application layer.
53 * This is where data manipulation coming in can be handled. The callback can either allow
54 * the even that trigged it to continue up the layer stack or it can suppress the event so
55 * no further processing takes place.
56 *
57 * For example, if the intermediate layer doesn't need to do any processing of the data or has completed
58 * all processing it will allow the event to propagate up. If the layer needs more data before it
59 * can be used by the next layer it will suppress the event so processing the event stops.
60 *
61 * A read event from `processevent_cb` needs to read the data from the layer under in order to
62 * get the data flowing up to process. A write event needs to write any pending data to the layer under
63 * in order for it to be sent out. Read flows up and write flows down.
64 *
65 * Events always come from the bottom up. Either the lower layer(s) are stating there is data to read,
66 * or it is stating data can be written. If there is no processing of read data or no data to write
67 * the events would be allowed to propagate upwards so other layers (or the application) can handle the event.
68 *
69 * For processing read events, from the `processevent_cb` it is necessary to use
70 * `M_io_layer_read()` like so `M_io_layer_read(io, M_io_layer_get_index(layer) - 1, buf, &buf_len, meta);`.
71 * Since data is flowing up, the layer under a given layer has the pending read data that needs to be
72 * processed.
73 *
74 * For processing write events, from the `processevent_cb` it is necessary to use
75 * `M_io_layer_write()` like so `M_io_layer_write(io, M_io_layer_get_index(layer) - 1, NULL, NULL, meta);`.
76 * Since data is flowing down, the layer under a given layer has needs to write the pending data.
77 *
78 * An application would use `M_io_read()` and `M_io_write()`. These always flow from the top layer down.
79 * Since this layer is in the middle we need to always work with the layer beneath.
80 *
81 * ## Read / Write Callbacks
82 *
83 * The `read_cb` and `write_cb` set by `M_io_callbacks_reg_read()` and `M_io_callbacks_reg_write()`
84 * flow down.
85 *
86 * A layer above will call `M_io_layer_read` or if the top most layer the application would
87 * have called `M_io_read`. These call the layers `read_cb` If there is no read callback registered the
88 * layer is skipped and the next layer in the sequence if called. This happens internally.
89 * The `read_cb` will return any buffered data that has been read and passes it upward.
90 * The data is typically buffered via the read event form `processevent_cb`.
91 *
92 * A layer above will call `M_io_layer_write` or if the top most layer the application would
93 * have called `M_io_write`. These call the layers `write_cb` If there is no write callback registered the
94 * layer is skipped and the next layer in the sequence if called. This happens internally.
95 * The `write_cb` will receive and data that needs to be passed down for writing. Typically, the
96 * `write_cb` will attempt to write the data immediately (after handling any processing) but may
97 * need to buffer the data and write more later when the `processevent_cb` receives a write event
98 * stating layers below can accept data to write.
99 *
100 * ## Examples
101 *
102 * Example layers:
103 * - Basic layer that marshals data. Useful for starting a new layer.
104 * - Processing STX+ETX+LRC with ACK/NAK and resending message
105 * - BLE helper layer that handles secure pairing (if necessary) and setting read/write characteristic endpoints.
106 *
107 * ### Basic
108 *
109 * \code{.c}
110 * #include <mstdlib/io/m_io_layer.h>
111 *
112 * // - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
113 *
114 * struct M_io_handle {
115 * M_buf_t *read_buf;
116 * M_buf_t *write_buf;
117 * char err[256];
118 * };
119 *
120 * // - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
121 *
122 * // Writes as much data as possible from the given buffer to the layer below this one.
123 * static M_io_error_t write_to_next_layer(M_io_layer_t *layer, M_buf_t *buf)
124 * {
125 * M_io_t *io;
126 * M_io_error_t err;
127 * size_t layer_idx;
128 * size_t write_len;
129 *
130 * if (layer == NULL || M_buf_len(buf) == 0)
131 * return M_IO_ERROR_INVALID;
132 *
133 * io = M_io_layer_get_io(layer);
134 * layer_idx = M_io_layer_get_index(layer);
135 *
136 * if (io == NULL || layer_idx == 0)
137 * return M_IO_ERROR_INVALID;
138 *
139 * err = M_IO_ERROR_SUCCESS;
140 * do {
141 * write_len = M_buf_len(buf);
142 * err = M_io_layer_write(io, layer_idx - 1, (const M_uint8 *)M_buf_peek(buf), &write_len, NULL);
143 * if (err == M_IO_ERROR_SUCCESS) {
144 * M_buf_drop(buf, write_len);
145 * }
146 * } while (err == M_IO_ERROR_SUCCESS && M_buf_len(buf) > 0);
147 *
148 * return err;
149 * }
150 *
151 * // - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
152 *
153 * static M_bool processevent_cb(M_io_layer_t *layer, M_event_type_t *type)
154 * {
155 * M_io_handle_t *handle = M_io_layer_get_handle(layer);
156 * M_io_t *io = M_io_layer_get_io(layer);
157 * M_bool consume = M_FALSE; // Default to passing onto the next layer.
158 * M_io_error_t err;
159 *
160 * if (handle == NULL || io == NULL)
161 * return M_FALSE;
162 *
163 * switch (*type) {
164 * case M_EVENT_TYPE_CONNECTED:
165 * break;
166 * case M_EVENT_TYPE_WRITE:
167 * if (M_buf_len(handle->write_buf) != 0) {
168 * err = write_to_next_layer(layer, handle->write_buf);
169 * if (M_io_error_is_critical(err)) {
170 * M_snprintf(handle->err, sizeof(handle->err), "Error writing data: %s", M_io_error_string(err));
171 * *type = M_EVENT_TYPE_ERROR;
172 * } else if (M_buf_len(handle->write_buf) != 0) {
173 * // Don't inform higher levels we can write if we have more data pending.
174 * consume = M_TRUE;
175 * }
176 * }
177 * break;
178 * case M_EVENT_TYPE_READ:
179 * // We're getting data from the device. Let's pull out the data and check if we want it or not.
180 * do {
181 * M_uint8 buf[256] = { 0 };
182 * size_t buf_len = sizeof(buf);
183 *
184 * err = M_io_layer_read(io, M_io_layer_get_index(layer) - 1, buf, &buf_len, NULL);
185 * if (err == M_IO_ERROR_SUCCESS) {
186 * // Save the data into the handler's buffer so we
187 * // can pass it up to the layer above in the read callback.
188 * M_buf_add_bytes(handle->read_buf, buf, buf_len);
189 * } else if (M_io_error_is_critical(err)) {
190 * M_snprintf(handle->err, sizeof(handle->err), "Error reading data: %s", M_io_error_string(err));
191 * *type = M_EVENT_TYPE_ERROR;
192 * break;
193 * }
194 * } while (err == M_IO_ERROR_SUCCESS);
195 * break;
196 * case M_EVENT_TYPE_ERROR:
197 * M_io_get_error_string(io, handle->err, sizeof(handle->err));
198 * break;
199 * case M_EVENT_TYPE_ACCEPT:
200 * case M_EVENT_TYPE_DISCONNECTED:
201 * case M_EVENT_TYPE_OTHER:
202 * break;
203 * }
204 *
205 * // M_TRUE to discard this event, or M_FALSE to pass it on to the next layer.
206 * return consume;
207 * }
208 *
209 * static M_io_error_t read_cb(M_io_layer_t *layer, unsigned char *buf, size_t *buf_len, M_io_meta_t *meta)
210 * {
211 * M_io_handle_t *handle = M_io_layer_get_handle(layer);
212 *
213 * (void)meta;
214 *
215 * if (handle == NULL || buf == NULL || buf_len == NULL || *buf_len == 0)
216 * return M_IO_ERROR_INVALID;
217 *
218 * // Check if we have any more data to pass on or not.
219 * if (M_buf_len(handle->read_buf) == 0)
220 * return M_IO_ERROR_WOULDBLOCK;
221 *
222 * // Pass on as much data as possible.
223 * *buf_len = M_MIN(*buf_len, M_buf_len(handle->read_buf));
224 * M_mem_copy(buf, M_buf_peek(handle->read_buf), *buf_len);
225 * M_buf_drop(handle->read_buf, *buf_len);
226 *
227 * return M_IO_ERROR_SUCCESS;
228 * }
229 *
230 * static M_io_error_t write_cb(M_io_layer_t *layer, const unsigned char *buf, size_t *buf_len, M_io_meta_t *meta)
231 * {
232 * M_io_handle_t *handle;
233 * M_io_error_t err;
234 *
235 * (void)meta;
236 *
237 * handle = M_io_layer_get_handle(layer);
238 *
239 * if (handle == NULL || buf == NULL || buf_len == NULL || *buf_len == 0)
240 * return M_IO_ERROR_INVALID;
241 *
242 * // Don't allow buffering data if we have data waiting to write.
243 * if (M_buf_len(handle->write_buf) > 0)
244 * return M_IO_ERROR_WOULDBLOCK;
245 *
246 * M_buf_add_bytes(handle->write_buf, buf, *buf_len);
247 *
248 * // Try to write as much of the message as we can right now.
249 * err = write_to_next_layer(layer, handle->write_buf);
250 *
251 * // Treat would block as success because we've buffered the data and it will
252 * // be written when possible.
253 * if (err == M_IO_ERROR_WOULDBLOCK)
254 * err = M_IO_ERROR_SUCCESS;
255 * return err;
256 * }
257 *
258 * // Dummy callback - only here because the docs for M_io_callbacks_t requires it to be present.
259 * static void unregister_cb(M_io_layer_t *layer)
260 * {
261 * (void)layer;
262 * // No-op
263 * }
264 *
265 * // Dummy callback - only here because the docs for M_io_callbacks_t requires it to be present.
266 * static M_bool reset_cb(M_io_layer_t *layer)
267 * {
268 * (void)layer;
269 * // No-op
270 * return M_TRUE;
271 * }
272 *
273 * static void destroy_cb(M_io_layer_t *layer)
274 * {
275 * M_io_handle_t *handle = M_io_layer_get_handle(layer);
276 *
277 * M_buf_cancel(handle->read_buf);
278 * M_buf_cancel(handle->write_buf);
279 *
280 * M_free(handle);
281 * }
282 *
283 * static M_bool error_cb(M_io_layer_t *layer, char *error, size_t err_len)
284 * {
285 * M_io_handle_t *handle = M_io_layer_get_handle(layer);
286 *
287 * if (M_str_isempty(handle->err))
288 * return M_FALSE;
289 *
290 * M_str_cpy(error, err_len, handle->err);
291 *
292 * return M_TRUE;
293 * }
294 *
295 * // - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
296 * // --- MAIN ENTRY ---
297 *
298 * void basic_example_layer(M_io_t *io)
299 * {
300 * M_io_handle_t *handle;
301 * M_io_callbacks_t *callbacks;
302 *
303 * if (io == NULL)
304 * return;
305 *
306 * handle = M_malloc_zero(sizeof(*handle));
307 * handle->read_buf = M_buf_create();
308 * handle->write_buf = M_buf_create();
309 *
310 * callbacks = M_io_callbacks_create();
311 * M_io_callbacks_reg_processevent(callbacks, processevent_cb);
312 * M_io_callbacks_reg_read(callbacks, read_cb);
313 * M_io_callbacks_reg_write(callbacks, write_cb);
314 * M_io_callbacks_reg_unregister(callbacks, unregister_cb);
315 * M_io_callbacks_reg_reset(callbacks, reset_cb);
316 * M_io_callbacks_reg_destroy(callbacks, destroy_cb);
317 * M_io_callbacks_reg_errormsg(callbacks, error_cb);
318 *
319 * M_io_layer_add(io, "BASIC_LAYER", handle, callbacks);
320 *
321 * M_io_callbacks_destroy(callbacks);
322 * }
323 * \endcode
324 *
325 * ### STX + ETX + LRC with ACK / NAK and resending
326 *
327 * \code{.c}
328 * // IO Layer for generating and unwrapping STX+ETX+LRC with ACK/NAK messaging.
329 * // Supports resending message if no response is received within a specified time.
330 * // Will only resent X times before giving up and erring.
331 *
332 * #include <mstdlib/mstdlib.h>
333 * #include <mstdlib/io/m_io_layer.h>
334 *
335 * // - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
336 *
337 * struct M_io_handle {
338 * // Read.
339 * M_parser_t *readparser; // Buffer containing message that's currently being stitched together from packets.
340 * M_buf_t *readbuf; // Buffer containing message that's currently being consumed by the caller.
341 *
342 * // Write.
343 * M_buf_t *last_msg; // Last buffered message, used for resend if ACK not received.
344 * M_buf_t *writebuf; // Buffer containing message that's currently being consumed by the caller.
345 * M_bool can_write; // Whether we can write more data. We only allow 1 outstanding message at a time.
346 *
347 * size_t resend_cnt; // Number of times we've tried resending the message.
348 * M_event_timer_t *resend_timer; // Timer to track when a response hasn't been received and the message should be resent.
349 *
350 * // Error
351 * char err[256]; // Error message buffer.
352 * }; // Typedef'd to M_io_handle_t in m_io_layer.h
353 *
354 * // - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
355 *
356 * static const char *LAYER_NAME = "STXETXLRCACKNAK";
357 * static const size_t RESEND_MAX = 5;
358 * static const size_t RESEND_INTERVAL = 3*1000; // 3 seconds
359 * static const char *STX_STR = "\x02";
360 * static const char *ACK_STR = "\x06";
361 * static const char *NAK_STR = "\x15";
362 * static const unsigned char STX = 0x02;
363 * static const unsigned char ETX = 0x03;
364 * static const unsigned char ACK = 0x06;
365 * static const unsigned char NAK = 0x15;
366 *
367 * // - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
368 *
369 * static M_io_error_t write_int(M_io_layer_t *layer, M_io_handle_t *handle, M_io_meta_t *meta)
370 * {
371 * M_io_t *io;
372 * size_t layer_idx;
373 * M_io_error_t err = M_IO_ERROR_SUCCESS;
374 * size_t write_len = 0;
375 *
376 * if (layer == NULL || handle == NULL)
377 * return M_IO_ERROR_INVALID;
378 *
379 * if (M_buf_len(handle->writebuf) == 0)
380 * return M_IO_ERROR_SUCCESS;
381 *
382 * io = M_io_layer_get_io(layer);
383 * layer_idx = M_io_layer_get_index(layer);
384 *
385 * if (io == NULL || layer_idx == 0)
386 * return M_IO_ERROR_INVALID;
387 *
388 * while (err == M_IO_ERROR_SUCCESS && M_buf_len(handle->writebuf) > 0) {
389 * write_len = M_buf_len(handle->writebuf);
390 * err = M_io_layer_write(io, layer_idx-1, (const M_uint8 *)M_buf_peek(handle->writebuf), &write_len, meta);
391 * if (err != M_IO_ERROR_SUCCESS) {
392 * write_len = 0;
393 * }
394 * M_buf_drop(handle->writebuf, write_len);
395 * }
396 *
397 * if (err == M_IO_ERROR_SUCCESS && M_buf_len(handle->writebuf) == 0 && handle->can_write)
398 * M_io_layer_softevent_add(layer, M_TRUE, M_EVENT_TYPE_WRITE, M_IO_ERROR_SUCCESS);
399 *
400 * if (M_buf_len(handle->writebuf) == 0)
401 * M_event_timer_start(handle->resend_timer, RESEND_INTERVAL);
402 *
403 * return err;
404 * }
405 *
406 * // - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
407 *
408 * static void resend_message(M_io_layer_t *layer, M_io_handle_t *handle)
409 * {
410 * M_event_timer_stop(handle->resend_timer);
411 *
412 * if (handle->resend_cnt >= RESEND_MAX) {
413 * M_snprintf(handle->err, sizeof(handle->err), "Timeout: Exceeded resent attempts");
414 * M_io_layer_softevent_add(layer, M_TRUE, M_EVENT_TYPE_ERROR, M_IO_ERROR_TIMEDOUT);
415 * return;
416 * }
417 *
418 * M_printf("%s: RESEND!", LAYER_NAME);
419 *
420 * // Write the last message again.
421 * M_buf_truncate(handle->writebuf, 0);
422 * M_buf_add_bytes(handle->writebuf, M_buf_peek(handle->last_msg), M_buf_len(handle->last_msg));
423 * write_int(layer, handle, NULL);
424 *
425 * handle->resend_cnt++;
426 * // Restart the resend timer and try again.
427 * M_event_timer_start(handle->resend_timer, RESEND_INTERVAL);
428 * }
429 *
430 * static void resend_cb(M_event_t *el, M_event_type_t etype, M_io_t *io, void *thunk)
431 * {
432 * M_io_layer_t *layer = thunk;
433 * M_io_handle_t *handle = M_io_layer_get_handle(layer);
434 *
435 * (void)el;
436 * (void)io;
437 * (void)etype;
438 *
439 * M_printf("%s: NO RESPONSE!", LAYER_NAME);
440 * resend_message(layer, handle);
441 * }
442 *
443 * // - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
444 *
445 * static M_io_handle_t *create_handle(void)
446 * {
447 * M_io_handle_t *handle;
448 *
449 * handle = M_malloc_zero(sizeof(*handle));
450 * handle->readparser = M_parser_create(M_PARSER_FLAG_NONE);
451 * handle->readbuf = M_buf_create();
452 * handle->writebuf = M_buf_create();
453 * handle->last_msg = M_buf_create();
454 * handle->can_write = M_TRUE;
455 *
456 * return handle;
457 * }
458 *
459 * static void destroy_handle(M_io_handle_t *handle)
460 * {
461 * if (handle == NULL)
462 * return;
463 *
464 * M_parser_destroy(handle->readparser);
465 * M_buf_cancel(handle->readbuf);
466 * M_buf_cancel(handle->writebuf);
467 * M_buf_cancel(handle->last_msg);
468 * M_event_timer_remove(handle->resend_timer);
469 *
470 * M_free(handle);
471 * }
472 *
473 * // - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
474 *
475 * static M_bool process_message(M_io_layer_t *layer, M_io_handle_t *handle)
476 * {
477 * M_parser_t *msg_parser = NULL;
478 * M_PARSER_FRAME_ERROR frame_error;
479 * size_t len;
480 *
481 * if (M_parser_len(handle->readparser) == 0)
482 * return M_TRUE;
483 *
484 * // NAK means we need to send the last message again.
485 * len = M_parser_consume_charset(handle->readparser, (const unsigned char *)NAK_STR, 1);
486 * if (len != 0) {
487 * M_printf("%s: NAK!", LAYER_NAME);
488 * resend_message(layer, handle);
489 * return M_TRUE;
490 * }
491 *
492 * // Got something that's not a NAK.
493 * // Even if it's not an ACK we'll consider
494 * // the last message we sent processed.
495 * handle->resend_cnt = 0;
496 * M_event_timer_stop(handle->resend_timer);
497 * M_buf_truncate(handle->last_msg, 0);
498 *
499 * // ACK tells us we can write more data.
500 * len = M_parser_consume_charset(handle->readparser, (const unsigned char *)ACK_STR, 1);
501 * if (len > 1)
502 * handle->can_write = M_TRUE;
503 *
504 * // We could have garbage that will be ignored.
505 * len = M_parser_consume_not_charset(handle->readparser, (const unsigned char *)STX_STR, 1);
506 * if (len > 0)
507 * M_printf("%s: Unexpected data was dropped (%zu bytes)", LAYER_NAME, len);
508 *
509 * // We either have an STX at the start or no data. We also need at least 4 bytes of data to have a framed message.
510 * // We could have only received an ACK earlier and not need to process any data.
511 * if (M_parser_len(handle->readparser) < 4)
512 * return M_TRUE;
513 *
514 * // Pull the data out of the wrapper.
515 * frame_error = M_parser_read_stxetxlrc_message(handle->readparser, &msg_parser, M_PARSER_FRAME_ETX);
516 * if (frame_error != M_PARSER_FRAME_ERROR_SUCCESS) {
517 * // M_PARSER_FRAME_ERROR_INVALID, M_PARSER_FRAME_ERROR_NO_ETX, and M_PARSER_FRAME_ERROR_NO_LRC indicating needing more data
518 * // will result in waiting for more data to come in. M_PARSER_FRAME_ERROR_NO_STX shouldn't happen because
519 * // we've already validated the first character is an STX. M_PARSER_FRAME_ERROR_LRC_CALC_FAILED is a real faulre.
520 * if (frame_error == M_PARSER_FRAME_ERROR_LRC_CALC_FAILED) {
521 * M_printf("%s: Message LRC verification failed: dropped (%zu bytes)", LAYER_NAME, M_parser_len(msg_parser));
522 *
523 * M_buf_add_byte(handle->writebuf, NAK);
524 * write_int(layer, handle, NULL);
525 * }
526 * M_parser_destroy(msg_parser);
527 * return M_TRUE;
528 * }
529 *
530 * // Store the data.
531 * M_buf_add_bytes(handle->readbuf, M_parser_peek(msg_parser), M_parser_len(msg_parser));
532 *
533 * // Write an ACK.
534 * M_buf_add_byte(handle->writebuf, ACK);
535 * write_int(layer, handle, NULL);
536 *
537 * M_parser_destroy(msg_parser);
538 * return M_FALSE;
539 * }
540 *
541 * static M_bool handle_read(M_io_layer_t *layer, M_io_handle_t *handle, M_event_type_t *type)
542 * {
543 * M_io_t *io = M_io_layer_get_io(layer);
544 * M_bool discard = M_TRUE; // Default to processing and then discarding all events
545 * M_io_error_t err;
546 * unsigned char buf[8192] = { 0 };
547 * size_t len;
548 *
549 * do {
550 * len = sizeof(buf);
551 * err = M_io_layer_read(io, M_io_layer_get_index(layer)-1, buf, &len, NULL);
552 *
553 * if (err == M_IO_ERROR_SUCCESS) {
554 * M_parser_append(handle->readparser, buf, len);
555 * discard = process_message(layer, handle);
556 * } else if (M_io_error_is_critical(err)) {
557 * M_snprintf(handle->err, sizeof(handle->err), "Error reading message: %s", M_io_error_string(err));
558 * *type = M_EVENT_TYPE_ERROR;
559 * }
560 * } while (err == M_IO_ERROR_SUCCESS);
561 *
562 * return discard;
563 * }
564 *
565 * static M_bool handle_write(M_io_layer_t *layer, M_io_handle_t *handle, M_event_type_t *type)
566 * {
567 * M_io_error_t err;
568 *
569 * err = write_int(layer, handle, NULL);
570 * if (M_io_error_is_critical(err)) {
571 * M_snprintf(handle->err, sizeof(handle->err), "Error writing message: %s", M_io_error_string(err));
572 * *type = M_EVENT_TYPE_ERROR;
573 * return M_TRUE;
574 * }
575 *
576 * return M_FALSE;
577 * }
578 *
579 * static M_io_error_t read_cb(M_io_layer_t *layer, unsigned char *buf, size_t *read_len, M_io_meta_t *meta)
580 * {
581 * M_io_handle_t *handle;
582 * size_t bytes_left;
583 *
584 * (void)meta;
585 *
586 * handle = M_io_layer_get_handle(layer);
587 *
588 * // Zero-length reads are not allowed.
589 * if (handle == NULL || buf == NULL || read_len == NULL || *read_len == 0)
590 * return M_IO_ERROR_INVALID;
591 *
592 * // Process any outstanding messages and fill our read out buffer.
593 * process_message(layer, handle);
594 *
595 * // Check if we have a full message.
596 * if (M_buf_len(handle->readbuf) == 0)
597 * return M_IO_ERROR_WOULDBLOCK;
598 *
599 * // Read everything we can.
600 * bytes_left = M_buf_len(handle->readbuf);
601 * *read_len = M_MIN(*read_len, bytes_left);
602 *
603 * M_mem_copy(buf, M_buf_peek(handle->readbuf), *read_len);
604 * M_buf_drop(handle->readbuf, *read_len);
605 *
606 * // If we still have data available to read (needs to be processed)
607 * // send another read event.
608 * if (M_parser_len(handle->readparser) > 0)
609 * M_io_layer_softevent_add(layer, M_TRUE, M_EVENT_TYPE_READ, M_IO_ERROR_SUCCESS);
610 *
611 * return M_IO_ERROR_SUCCESS;
612 * }
613 *
614 * static M_io_error_t write_cb(M_io_layer_t *layer, const unsigned char *buf, size_t *buf_len, M_io_meta_t *meta)
615 * {
616 * M_io_handle_t *handle = M_io_layer_get_handle(layer);
617 * M_io_t *io = M_io_layer_get_io(layer);
618 * size_t mywrite_len = 0;
619 *
620 * if (handle == NULL || buf == NULL || buf_len == NULL || *buf_len == 0)
621 * return M_IO_ERROR_INVALID;
622 *
623 * if (M_buf_len(handle->writebuf) > 0)
624 * return M_IO_ERROR_WOULDBLOCK;
625 *
626 * // Wrap the message in "<stx> + <data> + <etx> + <lrc>"
627 * M_buf_add_byte(handle->writebuf, STX);
628 * M_buf_add_bytes(handle->writebuf, buf, *buf_len);
629 * M_buf_add_byte(handle->writebuf, ETX);
630 * M_buf_add_byte(handle->writebuf, M_mem_calc_lrc(M_buf_peek(handle->writebuf)+1, M_buf_len(handle->writebuf)-1));
631 *
632 * // Store this message in case we need to resend because writebuf will be truncated as data gets sent.
633 * M_buf_truncate(handle->last_msg, 0);
634 * M_buf_add_bytes(handle->last_msg, M_buf_peek(handle->writebuf), M_buf_len(handle->writebuf));
635 *
636 * handle->can_write = M_FALSE;
637 * return write_int(layer, handle, meta);
638 * }
639 *
640 * static M_bool processevent_cb(M_io_layer_t *layer, M_event_type_t *type)
641 * {
642 * M_io_handle_t *handle = M_io_layer_get_handle(layer);
643 * M_io_t *io = M_io_layer_get_io(layer);
644 * const char *estr;
645 *
646 * switch (*type) {
647 * case M_EVENT_TYPE_CONNECTED:
648 * handle->can_write = M_TRUE;
649 * // Fall thru
650 * case M_EVENT_TYPE_WRITE:
651 * return handle_write(layer, handle, type);
652 * case M_EVENT_TYPE_READ:
653 * return handle_read(layer, handle, type);
654 * case M_EVENT_TYPE_DISCONNECTED:
655 * case M_EVENT_TYPE_ERROR:
656 * case M_EVENT_TYPE_ACCEPT:
657 * case M_EVENT_TYPE_OTHER:
658 * return M_FALSE;
659 * }
660 *
661 * return M_FALSE;
662 * }
663 *
664 * static M_bool error_cb(M_io_layer_t *layer, char *error, size_t err_len)
665 * {
666 * M_io_handle_t *handle = M_io_layer_get_handle(layer);
667 *
668 * if (M_str_isempty(handle->err))
669 * return M_FALSE;
670 *
671 * M_str_cpy(error, err_len, handle->err);
672 * return M_TRUE;
673 * }
674 *
675 * // - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
676 *
677 * static M_bool init_cb(M_io_layer_t *layer)
678 * {
679 * M_io_handle_t *handle = M_io_layer_get_handle(layer);
680 * M_io_t *io = M_io_layer_get_io(layer);
681 *
682 * if (handle->resend_timer != NULL)
683 * M_event_timer_remove(handle->resend_timer);
684 * handle->resend_timer = M_event_timer_add(M_io_get_event(io), resend_cb, layer);
685 * return M_TRUE;
686 * }
687 *
688 * // Dummy callback - only here because the docs for M_io_callbacks_t requires it to be present.
689 * static void unregister_cb(M_io_layer_t *layer)
690 * {
691 * (void)layer;
692 * // No-op
693 * }
694 *
695 * // Dummy callback - only here because the docs for M_io_callbacks_t requires it to be present.
696 * static M_bool reset_cb(M_io_layer_t *layer)
697 * {
698 * (void)layer;
699 * // No-op
700 * return M_TRUE;
701 * }
702 *
703 * static void destroy_cb(M_io_layer_t *layer)
704 * {
705 * if (layer == NULL)
706 * return;
707 * destroy_handle(M_io_layer_get_handle(layer));
708 * }
709 *
710 * // - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
711 * // --- MAIN ENTRY ---
712 *
713 * void stxetxlrcacknak_io_layer(M_io_t *io, M_bool reconnect)
714 * {
715 * M_io_handle_t *handle = create_handle();
716 * M_io_callbacks_t *callbacks = M_io_callbacks_create();
717 *
718 * M_io_callbacks_reg_init(callbacks, init_cb);
719 * M_io_callbacks_reg_read(callbacks, read_cb);
720 * M_io_callbacks_reg_write(callbacks, write_cb);
721 * M_io_callbacks_reg_processevent(callbacks, processevent_cb);
722 * M_io_callbacks_reg_unregister(callbacks, unregister_cb);
723 * M_io_callbacks_reg_reset(callbacks, reset_cb);
724 * M_io_callbacks_reg_destroy(callbacks, destroy_cb);
725 * M_io_callbacks_reg_errormsg(callbacks, error_cb);
726 *
727 * M_io_layer_add(io, LAYER_NAME, handle, callbacks);
728 *
729 * M_io_callbacks_destroy(callbacks);
730 * }
731 *
732 * M_bool stxetxlrcacknak_layer_waiting_for_response(M_io_t *io)
733 * {
734 * size_t layer_count;
735 * size_t layer_idx;
736 * M_bool ret = M_FALSE;
737 *
738 * layer_count = M_io_layer_count(io);
739 *
740 * for (layer_idx=0; layer_idx<layer_count; layer_idx++) {
741 * M_io_layer_t *layer = M_io_layer_acquire(io, layer_idx, LAYER_NAME);
742 * if (layer == NULL) {
743 * continue;
744 * }
745 * M_io_handle_t *handle = M_io_layer_get_handle(layer);
746 * ret = !handle->can_write;
747 * M_io_layer_release(layer);
748 * break;
749 * }
750 *
751 * return ret;
752 * }
753 * \endcode
754 *
755 * ### BLE Helper
756 *
757 * \code{.c}
758 * #include <mstdlib/io/m_io_layer.h>
759 *
760 * // - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
761 *
762 * static const char *LAYER_NAME = "BLE_SERVICE_HELPER";
763 * static const char *BLE_SERVICE = "68950001-FBA1-BB3D-A043-647EF78ACD44";
764 * static const char *BLE_CHARACTERISTIC = "68951001-FBA1-BB3D-A043-647EF78ACD44";
765 *
766 * // - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
767 *
768 * struct M_io_handle {
769 * M_io_meta_t *write_meta;
770 * M_buf_t *read_buf;
771 * M_buf_t *write_buf;
772 * char err[256];
773 * M_bool connected;
774 * };
775 *
776 * // - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
777 *
778 * // Writes as much data as possible from the given buffer to the layer below this one.
779 * static M_io_error_t write_to_next_layer(M_io_layer_t *layer, M_buf_t *buf, M_io_meta_t *meta)
780 * {
781 * M_io_t *io;
782 * M_io_error_t err;
783 * size_t layer_idx;
784 * size_t write_len;
785 *
786 * if (layer == NULL || M_buf_len(buf) == 0)
787 * return M_IO_ERROR_INVALID;
788 *
789 * io = M_io_layer_get_io(layer);
790 * layer_idx = M_io_layer_get_index(layer);
791 *
792 * if (io == NULL || layer_idx == 0)
793 * return M_IO_ERROR_INVALID;
794 *
795 * err = M_IO_ERROR_SUCCESS;
796 * do {
797 * write_len = M_buf_len(buf);
798 * err = M_io_layer_write(io, layer_idx - 1, (const M_uint8 *)M_buf_peek(buf), &write_len, meta);
799 * if (err == M_IO_ERROR_SUCCESS) {
800 * M_buf_drop(buf, write_len);
801 * }
802 * } while (err == M_IO_ERROR_SUCCESS && M_buf_len(buf) > 0);
803 *
804 * return err;
805 * }
806 *
807 * // - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
808 *
809 * static M_io_error_t write_pair_check_data(M_io_layer_t *layer, M_buf_t *write_buf, M_io_meta_t *write_meta)
810 * {
811 * // Simple command used as a dummy write to initiate pairing if needed. We just want to send something.
812 * // But this should be a real, simple, command for the device.
813 * M_buf_add_str(write_buf, "123\r\n");
814 * return write_to_next_layer(layer, write_buf, write_meta);
815 * }
816 *
817 * static M_io_error_t register_notifications(M_io_t *io, M_io_layer_t *layer, const char *service_uuid, const char *char_uuid)
818 * {
819 * M_io_meta_t *meta;
820 * M_io_error_t err;
821 *
822 * meta = M_io_meta_create();
823 * M_io_ble_meta_set_service(io, meta, service_uuid);
824 * M_io_ble_meta_set_characteristic(io, meta, char_uuid);
825 *
826 * M_io_ble_meta_set_notify(io, meta, M_TRUE);
827 * M_io_ble_meta_set_write_type(io, meta, M_IO_BLE_WTYPE_REQNOTIFY);
828 *
829 * // Write the metadata to the device to register we want to receive notification read events.
830 * err = M_io_layer_write(io, M_io_layer_get_index(layer) - 1, NULL, NULL, meta);
831 * M_io_meta_destroy(meta);
832 *
833 * return err;
834 * }
835 *
836 * // - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
837 *
838 * static M_bool processevent_cb(M_io_layer_t *layer, M_event_type_t *type)
839 * {
840 * M_io_handle_t *handle = M_io_layer_get_handle(layer);
841 * M_io_t *io = M_io_layer_get_io(layer);
842 * M_io_meta_t *meta;
843 * M_bool consume = M_FALSE; // Default to passing onto the next layer.
844 * M_io_error_t err;
845 *
846 * if (handle == NULL || io == NULL)
847 * return M_FALSE;
848 *
849 * switch (*type) {
850 * case M_EVENT_TYPE_CONNECTED:
851 * // If the device uses secure bonded/paired connections and a device is not paired, pairing takes place
852 * // when a secure characteristic is first written. At which point the device will show a PIN
853 * // and system (iOS, macOS...) will show a prompt to enter the PIN. We'll delay sending the
854 * // connected event until we get a response to a basic message that we use internally
855 * // to verify or initiate the pairing.
856 * //
857 * // We need to do this before trying to register for notifications. If
858 * // we're already paired then there is no problem. However, if we aren't
859 * // paired and need to go through pairing, attempting to register a notification
860 * // before writing a message will cause the device to disconnect.
861 * err = write_pair_check_data(layer, handle->write_buf, handle->write_meta);
862 * if (err != M_IO_ERROR_SUCCESS) {
863 * M_snprintf(handle->err, sizeof(handle->err), "Error writing initial pairing message: %s", M_io_error_string(err));
864 * *type = M_EVENT_TYPE_ERROR;
865 * break;
866 * }
867 * consume = M_TRUE;
868 * break;
869 * case M_EVENT_TYPE_WRITE:
870 * // Write event if we're not connected means this is a confirmation event that the data we
871 * // wrote was processed. If we're paired we'll get this right away. If we're not paired
872 * // we'll get this when pairing is successful. Otherwise, we'd get an error event.
873 * if (!handle->connected) {
874 * // We're paired so we can register for our read events.
875 * consume = M_TRUE;
876 * register_notifications(io, layer, BLE_SERVICE, BLE_CHARACTERISTIC);
877 * break;
878 * }
879 *
880 * if (M_buf_len(handle->write_buf) != 0) {
881 * err = write_to_next_layer(layer, handle->write_buf, handle->write_meta);
882 * if (M_io_error_is_critical(err)) {
883 * M_snprintf(handle->err, sizeof(handle->err), "Error writing data: %s", M_io_error_string(err));
884 * *type = M_EVENT_TYPE_ERROR;
885 * } else if (M_buf_len(handle->write_buf) != 0) {
886 * // Don't inform higher levels we can write if we have more data pending.
887 * consume = M_TRUE;
888 * }
889 * }
890 * break;
891 * case M_EVENT_TYPE_READ:
892 * // We're getting data from the device. Let's pull out the data and check if we want it or not.
893 * meta = M_io_meta_create();
894 * do {
895 * M_uint8 buf[256] = { 0 };
896 * size_t buf_len = sizeof(buf);
897 *
898 * err = M_io_layer_read(io, M_io_layer_get_index(layer) - 1, buf, &buf_len, meta);
899 * // We should get one notification response to the single notification we requested.
900 * // If we aren't connected and we get it, then we want to tell those above us we're
901 * // connected now that we've finished setup. Otherwise, if we get an unexpected notification
902 * // response, eat it because there is no data for anyone to read.
903 * if (M_io_ble_meta_get_read_type(io, meta) == M_IO_BLE_RTYPE_NOTIFY) {
904 * if (handle->connected) {
905 * consume = M_TRUE;
906 * } else {
907 * handle->connected = M_TRUE;
908 * *type = M_EVENT_TYPE_CONNECTED;
909 * break;
910 * }
911 * }
912 *
913 * if (err == M_IO_ERROR_SUCCESS) {
914 * // Save the data into the handler's buffer so we
915 * // can pass it up to the layer above in the read callback.
916 * M_buf_add_bytes(handle->read_buf, buf, buf_len);
917 * } else if (M_io_error_is_critical(err)) {
918 * M_snprintf(handle->err, sizeof(handle->err), "Error reading data: %s", M_io_error_string(err));
919 * *type = M_EVENT_TYPE_ERROR;
920 * break;
921 * }
922 * } while (err == M_IO_ERROR_SUCCESS);
923 * M_io_meta_destroy(meta);
924 * break;
925 * case M_EVENT_TYPE_ERROR:
926 * M_io_get_error_string(io, handle->err, sizeof(handle->err));
927 * break;
928 * case M_EVENT_TYPE_ACCEPT:
929 * case M_EVENT_TYPE_DISCONNECTED:
930 * case M_EVENT_TYPE_OTHER:
931 * break;
932 * }
933 *
934 * // M_TRUE to discard this event, or M_FALSE to pass it on to the next layer.
935 * return consume;
936 * }
937 *
938 * static M_io_error_t read_cb(M_io_layer_t *layer, unsigned char *buf, size_t *buf_len, M_io_meta_t *meta)
939 * {
940 * M_io_handle_t *handle = M_io_layer_get_handle(layer);
941 *
942 * (void)meta;
943 *
944 * if (handle == NULL || buf == NULL || buf_len == NULL || *buf_len == 0)
945 * return M_IO_ERROR_INVALID;
946 *
947 * // Check if we have any more data to pass on or not.
948 * if (M_buf_len(handle->read_buf) == 0)
949 * return M_IO_ERROR_WOULDBLOCK;
950 *
951 * // Pass on as much data as possible.
952 * *buf_len = M_MIN(*buf_len, M_buf_len(handle->read_buf));
953 * M_mem_copy(buf, M_buf_peek(handle->read_buf), *buf_len);
954 * M_buf_drop(handle->read_buf, *buf_len);
955 *
956 * return M_IO_ERROR_SUCCESS;
957 * }
958 *
959 * static M_io_error_t write_cb(M_io_layer_t *layer, const unsigned char *buf, size_t *buf_len, M_io_meta_t *meta)
960 * {
961 * M_io_handle_t *handle;
962 * M_io_error_t err;
963 *
964 * (void)meta;
965 *
966 * handle = M_io_layer_get_handle(layer);
967 *
968 * if (handle == NULL || buf == NULL || buf_len == NULL || *buf_len == 0)
969 * return M_IO_ERROR_INVALID;
970 *
971 * if (!handle->connected)
972 * return M_IO_ERROR_WOULDBLOCK;
973 *
974 * // Don't allow buffering data if we have data waiting to write.
975 * if (M_buf_len(handle->write_buf) > 0)
976 * return M_IO_ERROR_WOULDBLOCK;
977 *
978 * M_buf_add_bytes(handle->write_buf, buf, *buf_len);
979 *
980 * // Try to write as much of the message as we can right now.
981 * err = write_to_next_layer(layer, handle->write_buf, handle->write_meta);
982 *
983 * // Treat would block as success because we've buffered the data and it will
984 * // be written when possible.
985 * if (err == M_IO_ERROR_WOULDBLOCK)
986 * err = M_IO_ERROR_SUCCESS;
987 * return err;
988 * }
989 *
990 * // Dummy callback - only here because the docs for M_io_callbacks_t requires it to be present.
991 * static void unregister_cb(M_io_layer_t *layer)
992 * {
993 * (void)layer;
994 * // No-op
995 * }
996 *
997 * // Dummy callback - only here because the docs for M_io_callbacks_t requires it to be present.
998 * static M_bool reset_cb(M_io_layer_t *layer)
999 * {
1000 * (void)layer;
1001 * // No-op
1002 * return M_TRUE;
1003 * }
1004 *
1005 * static void destroy_cb(M_io_layer_t *layer)
1006 * {
1007 * M_io_handle_t *handle = M_io_layer_get_handle(layer);
1008 *
1009 * M_io_meta_destroy(handle->write_meta);
1010 * M_buf_cancel(handle->read_buf);
1011 * M_buf_cancel(handle->write_buf);
1012 *
1013 * M_free(handle);
1014 * }
1015 *
1016 * static M_bool error_cb(M_io_layer_t *layer, char *error, size_t err_len)
1017 * {
1018 * M_io_handle_t *handle = M_io_layer_get_handle(layer);
1019 *
1020 * if (M_str_isempty(handle->err))
1021 * return M_FALSE;
1022 *
1023 * M_str_cpy(error, err_len, handle->err);
1024 *
1025 * return M_TRUE;
1026 * }
1027 *
1028 * // - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
1029 * // --- MAIN ENTRY ---
1030 * //
1031 * void ble_helper_io_layer(M_io_t *io)
1032 * {
1033 * M_io_handle_t *handle;
1034 * M_io_callbacks_t *callbacks;
1035 *
1036 * if (io == NULL)
1037 * return;
1038 *
1039 * handle = M_malloc_zero(sizeof(*handle));
1040 * handle->write_meta = M_io_meta_create();
1041 * handle->read_buf = M_buf_create();
1042 * handle->write_buf = M_buf_create();
1043 *
1044 * M_io_ble_meta_set_service(io, handle->write_meta, BLE_SERVICE);
1045 * M_io_ble_meta_set_characteristic(io, handle->write_meta, BLE_CHARACTERISTIC);
1046 * M_io_ble_meta_set_write_type(io, handle->write_meta, M_IO_BLE_WTYPE_WRITE);
1047 *
1048 * callbacks = M_io_callbacks_create();
1049 * M_io_callbacks_reg_processevent(callbacks, processevent_cb);
1050 * M_io_callbacks_reg_read(callbacks, read_cb);
1051 * M_io_callbacks_reg_write(callbacks, write_cb);
1052 * M_io_callbacks_reg_unregister(callbacks, unregister_cb);
1053 * M_io_callbacks_reg_reset(callbacks, reset_cb);
1054 * M_io_callbacks_reg_destroy(callbacks, destroy_cb);
1055 * M_io_callbacks_reg_errormsg(callbacks, error_cb);
1056 *
1057 * M_io_layer_add(io, LAYER_NAME, handle, callbacks);
1058 *
1059 * M_io_callbacks_destroy(callbacks);
1060 * }
1061 * \endcode
1062 *
1063 * @{
1064 */
1065
1066
1067#ifdef _WIN32
1068/* Needed for types like SOCKET */
1069# include <winsock2.h>
1070# include <ws2tcpip.h>
1071# include <mstcpip.h>
1072# include <windows.h>
1073# define M_EVENT_SOCKET SOCKET
1074# define M_EVENT_INVALID_SOCKET INVALID_SOCKET
1075# define M_EVENT_HANDLE HANDLE
1076# define M_EVENT_INVALID_HANDLE NULL
1077#else
1078# define M_EVENT_HANDLE int
1079# define M_EVENT_INVALID_HANDLE -1
1080# define M_EVENT_SOCKET int
1081# define M_EVENT_INVALID_SOCKET -1
1082#endif
1083
1084
1085struct M_io_layer;
1086typedef struct M_io_layer M_io_layer_t;
1087
1088struct M_io_handle;
1089typedef struct M_io_handle M_io_handle_t;
1090
1091struct M_io_callbacks;
1092typedef struct M_io_callbacks M_io_callbacks_t;
1093
1094
1095
1096/*! Find the appropriate layer and grab the handle and lock it.
1097 *
1098 * \warning Locking the layer locks the entire event loop. Only very
1099 * short operations that will not block should be performed
1100 * while a layer lock is being held.
1101 *
1102 * \param[in] io Pointer to io object
1103 * \param[in] layer_id id of layer to lock, or M_IO_LAYER_FIND_FIRST_ID to search for layer.
1104 * \param[in] name Name of layer to lock. This can be used as a sanity check to ensure
1105 * the layer id really matches the layer type. Use NULL if name matching
1106 * is not required. If M_IO_LAYER_FIND_FIRST_ID is used for the layer_id,
1107 * this parameter cannot be NULL.
1108 *
1109 * \return locked io layer, or NULL on failure
1110 *
1111 * \see M_io_layer_release
1112 */
1113M_API M_io_layer_t *M_io_layer_acquire(M_io_t *io, size_t layer_id, const char *name);
1114
1115/*! Release the lock on the layer */
1117
1118/*! Initialize a new io object of given type */
1120
1121/*! Get the type of the io object */
1123
1124/*! Create M_io_callbacks_t object that can be passed to M_io_layer_add */
1126
1127/*! Register callback to initialize/begin. Is called when the io object is attached
1128 * to an event. Mandatory. */
1129M_API M_bool M_io_callbacks_reg_init(M_io_callbacks_t *callbacks, M_bool (*cb_init)(M_io_layer_t *layer));
1130
1131/*! Register callback to accept a new connection. Conditional. */
1132M_API M_bool M_io_callbacks_reg_accept(M_io_callbacks_t *callbacks, M_io_error_t (*cb_accept)(M_io_t *new_conn, M_io_layer_t *orig_layer));
1133
1134/*! Register callback to read from the connection. Optional if not base layer, required if base layer */
1135M_API M_bool M_io_callbacks_reg_read(M_io_callbacks_t *callbacks, M_io_error_t (*cb_read)(M_io_layer_t *layer, unsigned char *buf, size_t *read_len, M_io_meta_t *meta));
1136
1137/*! Register callback to write to the connection. Optional if not base layer, required if base layer */
1138M_API M_bool M_io_callbacks_reg_write(M_io_callbacks_t *callbacks, M_io_error_t (*cb_write)(M_io_layer_t *layer, const unsigned char *buf, size_t *write_len, M_io_meta_t *meta));
1139
1140/*! Register callback to process events. Optional. If returns M_TRUE event is consumed and not propagated to the next layer. */
1141M_API M_bool M_io_callbacks_reg_processevent(M_io_callbacks_t *callbacks, M_bool (*cb_process_event)(M_io_layer_t *layer, M_event_type_t *type));
1142
1143/*! Register callback that is called when io object is removed from event object. Mandatory */
1144M_API M_bool M_io_callbacks_reg_unregister(M_io_callbacks_t *callbacks, void (*cb_unregister)(M_io_layer_t *layer));
1145
1146/*! Register callback to start a graceful disconnect sequence. Optional. */
1147M_API M_bool M_io_callbacks_reg_disconnect(M_io_callbacks_t *callbacks, M_bool (*cb_disconnect)(M_io_layer_t *layer));
1148
1149/*! Register callback to reset any state (M_io_handle_t *). Optional.
1150 *
1151 * Will reset the state of the layer for re-connection.
1152 */
1153M_API M_bool M_io_callbacks_reg_reset(M_io_callbacks_t *callbacks, M_bool (*cb_reset)(M_io_layer_t *layer));
1154
1155/*! Register callback to destroy any state (M_io_handle_t *). Mandatory.
1156 *
1157 * The event loop has already been disassociated from the layer when this
1158 * callback is called. The layer will not be locked and M_io_layer_acquire
1159 * will not lock the layer as the layer cannot be locked.
1160 */
1161M_API M_bool M_io_callbacks_reg_destroy(M_io_callbacks_t *callbacks, void (*cb_destroy)(M_io_layer_t *layer));
1162
1163/*! Register callback to get the layer state. Optional if not base layer, required if base layer. */
1164M_API M_bool M_io_callbacks_reg_state(M_io_callbacks_t *callbacks, M_io_state_t (*cb_state)(M_io_layer_t *layer));
1165
1166/*! Register callback to get the error message, will be called if cb_state returns M_IO_STATE_ERROR. If registered, cb_state must also be registered */
1167M_API M_bool M_io_callbacks_reg_errormsg(M_io_callbacks_t *callbacks, M_bool (*cb_errormsg)(M_io_layer_t *layer, char *error, size_t err_len));
1168
1169/*! Destroy M_io_callbacks_t object */
1171
1172/*! Maximum number of layers for an I/O object. One reserved for the user layer */
1173#define M_IO_LAYERS_MAX 16
1174
1175/*! Add a layer to an io object */
1176M_API M_io_layer_t *M_io_layer_add(M_io_t *io, const char *layer_name, M_io_handle_t *handle, const M_io_callbacks_t *callbacks);
1177
1178/*! Given a layer object, retrieve the M_io_t reference */
1180
1181/*! Given a layer object, retrieve the name of the layer */
1182M_API const char *M_io_layer_get_name(M_io_layer_t *layer);
1183
1184/*! Given a layer object, retrieve the implementation-specific handle */
1186
1187/*! Given a layer object, retrieve the index of the layer in the parent M_io_t object */
1189
1190/*! Perform a read operation at the given layer index */
1191M_API M_io_error_t M_io_layer_read(M_io_t *io, size_t layer_id, unsigned char *buf, size_t *read_len, M_io_meta_t *meta);
1192
1193/*! Perform a write operation at the given layer index */
1194M_API M_io_error_t M_io_layer_write(M_io_t *io, size_t layer_id, const unsigned char *buf, size_t *write_len, M_io_meta_t *meta);
1195
1197
1198/*! Add a soft-event. If sibling_only is true, will only notify next layer and not self. Must specify an error. */
1199M_API void M_io_layer_softevent_add(M_io_layer_t *layer, M_bool sibling_only, M_event_type_t type, M_io_error_t err);
1200
1201/*! Clear all soft events for the current layer */
1203
1204/*! Add a soft-event. If sibling_only is true, will only delete the soft event for the next layer up and not self. */
1205M_API void M_io_layer_softevent_del(M_io_layer_t *layer, M_bool sibling_only, M_event_type_t type);
1206
1207/*! Sets the internal error for the IO object. Used within a process events callback if emitting an error */
1209
1210/*! @} */
1211
1212__END_DECLS
1213
1214
1215#endif
enum M_event_type M_event_type_t
Definition: m_event.h:189
M_bool M_io_callbacks_reg_errormsg(M_io_callbacks_t *callbacks, M_bool(*cb_errormsg)(M_io_layer_t *layer, char *error, size_t err_len))
M_bool M_io_callbacks_reg_processevent(M_io_callbacks_t *callbacks, M_bool(*cb_process_event)(M_io_layer_t *layer, M_event_type_t *type))
void M_io_layer_softevent_add(M_io_layer_t *layer, M_bool sibling_only, M_event_type_t type, M_io_error_t err)
M_bool M_io_callbacks_reg_read(M_io_callbacks_t *callbacks, M_io_error_t(*cb_read)(M_io_layer_t *layer, unsigned char *buf, size_t *read_len, M_io_meta_t *meta))
M_bool M_io_error_is_critical(M_io_error_t err)
M_io_error_t M_io_layer_write(M_io_t *io, size_t layer_id, const unsigned char *buf, size_t *write_len, M_io_meta_t *meta)
struct M_io_layer M_io_layer_t
Definition: m_io_layer.h:1086
M_io_t * M_io_init(M_io_type_t type)
M_io_t * M_io_layer_get_io(M_io_layer_t *layer)
void M_io_layer_softevent_del(M_io_layer_t *layer, M_bool sibling_only, M_event_type_t type)
void M_io_layer_release(M_io_layer_t *layer)
M_bool M_io_callbacks_reg_disconnect(M_io_callbacks_t *callbacks, M_bool(*cb_disconnect)(M_io_layer_t *layer))
struct M_io_handle M_io_handle_t
Definition: m_io_layer.h:1089
size_t M_io_layer_get_index(M_io_layer_t *layer)
M_bool M_io_callbacks_reg_state(M_io_callbacks_t *callbacks, M_io_state_t(*cb_state)(M_io_layer_t *layer))
M_io_error_t M_io_layer_read(M_io_t *io, size_t layer_id, unsigned char *buf, size_t *read_len, M_io_meta_t *meta)
void M_io_set_error(M_io_t *io, M_io_error_t err)
void M_io_callbacks_destroy(M_io_callbacks_t *callbacks)
const char * M_io_layer_get_name(M_io_layer_t *layer)
M_bool M_io_callbacks_reg_destroy(M_io_callbacks_t *callbacks, void(*cb_destroy)(M_io_layer_t *layer))
M_bool M_io_callbacks_reg_init(M_io_callbacks_t *callbacks, M_bool(*cb_init)(M_io_layer_t *layer))
void M_io_layer_softevent_clear(M_io_layer_t *layer)
M_bool M_io_callbacks_reg_reset(M_io_callbacks_t *callbacks, M_bool(*cb_reset)(M_io_layer_t *layer))
M_io_layer_t * M_io_layer_add(M_io_t *io, const char *layer_name, M_io_handle_t *handle, const M_io_callbacks_t *callbacks)
struct M_io_callbacks M_io_callbacks_t
Definition: m_io_layer.h:1092
M_io_callbacks_t * M_io_callbacks_create(void)
M_bool M_io_callbacks_reg_unregister(M_io_callbacks_t *callbacks, void(*cb_unregister)(M_io_layer_t *layer))
M_bool M_io_callbacks_reg_accept(M_io_callbacks_t *callbacks, M_io_error_t(*cb_accept)(M_io_t *new_conn, M_io_layer_t *orig_layer))
M_io_layer_t * M_io_layer_acquire(M_io_t *io, size_t layer_id, const char *name)
M_io_type_t M_io_get_type(M_io_t *io)
M_io_handle_t * M_io_layer_get_handle(M_io_layer_t *layer)
M_bool M_io_callbacks_reg_write(M_io_callbacks_t *callbacks, M_io_error_t(*cb_write)(M_io_layer_t *layer, const unsigned char *buf, size_t *write_len, M_io_meta_t *meta))
enum M_io_error M_io_error_t
Definition: m_io.h:93
enum M_io_type M_io_type_t
Definition: m_io.h:54
struct M_io M_io_t
Definition: m_io.h:59
struct M_io_meta M_io_meta_t
Definition: m_io.h:64
enum M_io_state M_io_state_t
Definition: m_io.h:106