diff --git a/hw/bsp/nordic_pca10095_net/nrf5340_net.ld b/hw/bsp/nordic_pca10095_net/nrf5340_net.ld index 91bfe1c497..b28ab837ff 100644 --- a/hw/bsp/nordic_pca10095_net/nrf5340_net.ld +++ b/hw/bsp/nordic_pca10095_net/nrf5340_net.ld @@ -21,6 +21,8 @@ MEMORY FLASH (rx) : ORIGIN = 0x01008000, LENGTH = 0x30000 RAM (rwx) : ORIGIN = 0x21000000, LENGTH = 0x10000 IPC (rw) : ORIGIN = 0x20000400, LENGTH = 0x400 + sram_ipc0_tx (rw) : ORIGIN = 0x20070000, LENGTH = 0x4000 + sram_ipc0_rx (rw) : ORIGIN = 0x20074000, LENGTH = 0x4000 } /* This linker script is used for images and thus contains an image header */ diff --git a/hw/bsp/nordic_pca10095_net/pkg.yml b/hw/bsp/nordic_pca10095_net/pkg.yml index 8e6ce7dc6f..9d19b8aa40 100644 --- a/hw/bsp/nordic_pca10095_net/pkg.yml +++ b/hw/bsp/nordic_pca10095_net/pkg.yml @@ -34,5 +34,7 @@ pkg.deps: - "@apache-mynewt-core/hw/scripts" - "@apache-mynewt-core/hw/mcu/nordic/nrf5340_net" - "@apache-mynewt-core/libc" + +pkg.deps.'BLE_TRANSPORT_HS != "ipc"': - "@apache-mynewt-core/sys/flash_map" - "@apache-mynewt-core/hw/drivers/flash/ipc_nrf5340_flash" diff --git a/hw/drivers/ipc/icbmsg/include/icbmsg/icbmsg.h b/hw/drivers/ipc/icbmsg/include/icbmsg/icbmsg.h new file mode 100644 index 0000000000..a8fa2fbd7c --- /dev/null +++ b/hw/drivers/ipc/icbmsg/include/icbmsg/icbmsg.h @@ -0,0 +1,65 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +#ifndef _HW_DRIVERS_IPC_ICBMSG_H +#define _HW_DRIVERS_IPC_ICBMSG_H + +#include +#include +#include + +#ifdef __cplusplus +extern "C" { +#endif + +struct ipc_service_cb { + void (*received)(const void *data, size_t len, void *user_data); +}; + +struct ipc_ept_cfg { + const char *name; + struct ipc_service_cb cb; + void *user_data; + uint8_t tx_channel; + uint8_t rx_channel; +}; + +struct ipc_icmsg_buf { + size_t block_id; + uint8_t *data; + uint16_t len; +}; + +typedef void (*ipc_icbmsg_recv_cb)(uint8_t ipc_id, void *user_data); + +uint8_t ipc_icmsg_register_ept(uint8_t ipc_id, struct ipc_ept_cfg *cfg); + +int ipc_icbmsg_send(uint8_t ipc_id, uint8_t ept_addr, const void *data, uint16_t len); + +int ipc_icbmsg_send_buf(uint8_t ipc_id, uint8_t ept_addr, struct ipc_icmsg_buf *buf); + +int ipc_icbmsg_alloc_tx_buf(uint8_t ipc_id, struct ipc_icmsg_buf *buf, uint32_t size); + +uint8_t ipc_icsmsg_ept_ready(uint8_t ipc_id, uint8_t ept_addr); + +#ifdef __cplusplus +} +#endif + +#endif /* _HW_DRIVERS_IPC_ICBMSG_H */ diff --git a/hw/drivers/ipc/icbmsg/pkg.yml b/hw/drivers/ipc/icbmsg/pkg.yml new file mode 100644 index 0000000000..41f0b0a832 --- /dev/null +++ b/hw/drivers/ipc/icbmsg/pkg.yml @@ -0,0 +1,29 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# + +pkg.name: hw/drivers/ipc/icbmsg +pkg.description: IPC driver with icbmsg backend +pkg.author: "Apache Mynewt " +pkg.homepage: "http://mynewt.apache.org/" +pkg.keywords: + - ipc + +pkg.deps: + - "@apache-mynewt-core/hw/mcu/nordic" + - "@apache-mynewt-core/kernel/os" diff --git a/hw/drivers/ipc/icbmsg/src/icbmsg.c b/hw/drivers/ipc/icbmsg/src/icbmsg.c index 624896d657..5ab8284a22 100644 --- a/hw/drivers/ipc/icbmsg/src/icbmsg.c +++ b/hw/drivers/ipc/icbmsg/src/icbmsg.c @@ -15,6 +15,7 @@ * * Single channel (RX or TX) of the shared memory is divided into two areas: ICMsg area * followed by Blocks area. ICMsg is used to send and receive short 3-byte messages. + * The ICMsg messages are queued inside the ICSM area using PBUF format. * Blocks area is evenly divided into aligned blocks. Blocks are used to allocate * buffers containing actual data. Data buffers can span multiple blocks. The first block * starts with the size of the following data. @@ -75,423 +76,324 @@ * data messages, it calls bound endpoint and it is ready to send data. */ -#undef _POSIX_C_SOURCE -#define _POSIX_C_SOURCE 200809L /* For strnlen() */ -#include +#include +#include +#include +#include +#include +#include "utils.h" +#include "pbuf.h" -#include -#include -#include -#include -#include -#include +#if MYNEWT_VAL(MCU_APP_CORE) +__attribute__((section(".ipc0_tx"))) static uint8_t ipc0_tx_start[0x4000]; +__attribute__((section(".ipc0_rx"))) static uint8_t ipc0_rx_start[0x4000]; +#define TX_BLOCKS_NUM (16) +#define RX_BLOCKS_NUM (24) +#else +__attribute__((section(".ipc0_tx"))) static uint8_t ipc0_rx_start[0x4000]; +__attribute__((section(".ipc0_rx"))) static uint8_t ipc0_tx_start[0x4000]; +#define TX_BLOCKS_NUM (24) +#define RX_BLOCKS_NUM (16) +#endif + +#define ipc0_tx_end (ipc0_tx_start + sizeof(ipc0_tx_start)) +#define ipc0_rx_end (ipc0_rx_start + sizeof(ipc0_rx_start)) -LOG_MODULE_REGISTER(ipc_icbmsg, - CONFIG_IPC_SERVICE_BACKEND_ICBMSG_LOG_LEVEL); +#define tx_BLOCKS_NUM TX_BLOCKS_NUM +#define rx_BLOCKS_NUM RX_BLOCKS_NUM -#define DT_DRV_COMPAT zephyr_ipc_icbmsg +/* A string used to synchronize cores */ +static const uint8_t magic[] = {0x45, 0x6d, 0x31, 0x6c, 0x31, 0x4b, + 0x30, 0x72, 0x6e, 0x33, 0x6c, 0x69, 0x34}; -/** Allowed number of endpoints. */ -#define NUM_EPT CONFIG_IPC_SERVICE_BACKEND_ICBMSG_NUM_EP + +#define PBUF_RX_READ_BUF_SIZE 128 +static uint8_t icmsg_rx_buffer[PBUF_RX_READ_BUF_SIZE] __attribute__((aligned(4))); + +/** Allowed number of endpoints within an IPC instance. */ +#define NUM_EPT MYNEWT_VAL(IPC_ICBMSG_NUM_EP) /** Special endpoint address indicating invalid (or empty) entry. */ #define EPT_ADDR_INVALID 0xFF -/** Special value for empty entry in bound message waiting table. */ -#define WAITING_BOUND_MSG_EMPTY 0xFFFF - /** Size of the header (size field) of the block. */ #define BLOCK_HEADER_SIZE (sizeof(struct block_header)) /** Flag indicating that ICMsg was bounded for this instance. */ -#define CONTROL_BOUNDED BIT(31) +#define CONTROL_BOUNDED (1 << 31) /** Registered endpoints count mask in flags. */ #define FLAG_EPT_COUNT_MASK 0xFFFF -/** Workqueue stack size for bounding processing (this configuration is not optimized). */ -#define EP_BOUND_WORK_Q_STACK_SIZE (512U) - -/** Workqueue priority for bounding processing. */ -#define EP_BOUND_WORK_Q_PRIORITY (CONFIG_SYSTEM_WORKQUEUE_PRIORITY) +enum icmsg_state { + ICMSG_STATE_OFF, + ICMSG_STATE_BUSY, + ICMSG_STATE_READY, +}; enum msg_type { - MSG_DATA = 0, /* Data message. */ - MSG_RELEASE_DATA, /* Release data buffer message. */ - MSG_BOUND, /* Endpoint bounding message. */ - MSG_RELEASE_BOUND, /* Release endpoint bound message. - * This message is also indicator for the receiving side - * that the endpoint bounding was fully processed on - * the sender side. - */ + /* Data message. */ + MSG_DATA = 0, + /* Release data buffer message. */ + MSG_RELEASE_DATA, + /* Endpoint bounding message. */ + MSG_BOUND, + /* Release endpoint bound message. + * This message is also indicator for the receiving side + * that the endpoint bounding was fully processed on + * the sender side. + */ + MSG_RELEASE_BOUND, }; enum ept_bounding_state { - EPT_UNCONFIGURED = 0, /* Endpoint in not configured (initial state). */ - EPT_CONFIGURED, /* Endpoint is configured, waiting for work queue to - * start bounding process. - */ - EPT_BOUNDING, /* Only on initiator. Bound message was send, - * but bound callback was not called yet, because - * we are waiting for any incoming messages. - */ - EPT_READY, /* Bounding is done. Bound callback was called. */ + /* Endpoint in not configured (initial state). */ + EPT_UNCONFIGURED = 0, + /* Endpoint is configured, waiting for work queue to + * start bounding process. + */ + EPT_CONFIGURED, + /* Only on initiator. Bound message was send, + * but bound callback was not called yet, because + * we are waiting for any incoming messages. + */ + EPT_BOUNDING, + /* Bounding is done. Bound callback was called. */ + EPT_READY, }; struct channel_config { - uint8_t *blocks_ptr; /* Address where the blocks start. */ - size_t block_size; /* Size of one block. */ - size_t block_count; /* Number of blocks. */ + /* Address where the blocks start. */ + uint8_t *blocks_ptr; + /* Size of one block. */ + size_t block_size; + /* Number of blocks. */ + size_t block_count; }; -struct icbmsg_config { - struct icmsg_config_t control_config; /* Configuration of the ICMsg. */ - struct channel_config rx; /* RX channel config. */ - struct channel_config tx; /* TX channel config. */ - sys_bitarray_t *tx_usage_bitmap; /* Bit is set when TX block is in use */ - sys_bitarray_t *rx_hold_bitmap; /* Bit is set, if the buffer starting at - * this block should be kept after exit - * from receive handler. - */ -}; +struct ipc_instance; struct ept_data { - const struct ipc_ept_cfg *cfg; /* Endpoint configuration. */ - atomic_t state; /* Bounding state. */ - uint8_t addr; /* Endpoint address. */ + struct ipc_instance *ipc; + struct ipc_ept_cfg *cfg; + /* Bounding state. */ + uint32_t state; + /* Endpoint address. */ + uint8_t addr; }; -struct backend_data { - const struct icbmsg_config *conf;/* Backend instance config. */ - struct icmsg_data_t control_data;/* ICMsg data. */ -#ifdef CONFIG_MULTITHREADING - struct k_mutex mutex; /* Mutex to protect: ICMsg send call and - * waiting_bound field. - */ - struct k_work ep_bound_work; /* Work item for bounding processing. */ - struct k_sem block_wait_sem; /* Semaphore for waiting for free blocks. */ -#endif - struct ept_data ept[NUM_EPT]; /* Array of registered endpoints. */ - uint8_t ept_map[NUM_EPT]; /* Array that maps endpoint address to index. */ - uint16_t waiting_bound[NUM_EPT];/* The bound messages waiting to be registered. */ - atomic_t flags; /* Flags on higher bits, number of registered - * endpoints on lower. - */ - bool is_initiator; /* This side has an initiator role. */ +struct ipc_instance { + /* Bit is set when TX block is in use */ + uint8_t tx_usage_bitmap[DIV_ROUND_UP(TX_BLOCKS_NUM, 8)]; + /* Bit is set, if the buffer starting at + * this block should be kept after exit + * from receive handler. + */ + uint8_t rx_usage_bitmap[DIV_ROUND_UP(RX_BLOCKS_NUM, 8)]; + /* TX ICSMsg Area packet buffer */ + struct pbuf tx_pb; + /* RX ICSMsg Area packet buffer */ + struct pbuf rx_pb; + /* TX channel config. */ + struct channel_config tx; + /* RX channel config. */ + struct channel_config rx; + /* Array of registered endpoints. */ + struct ept_data ept[NUM_EPT]; + /* Flags on higher bits, number of registered + * endpoints on lower. + */ + uint32_t flags; + /* This side has an initiator role. */ + bool is_initiator; + uint8_t state; + uint8_t ipc_id; }; - struct block_header { - volatile size_t size; /* Size of the data field. It must be volatile, because - * when this value is read and validated for security - * reasons, compiler cannot generate code that reads - * it again after validation. - */ + /* Size of the data field. It must be volatile, because + * when this value is read and validated for security + * reasons, compiler cannot generate code that reads + * it again after validation. + */ + volatile size_t size; }; struct block_content { - struct block_header header; - uint8_t data[]; /* Buffer data. */ + struct block_header header; + /* Buffer data. */ + uint8_t data[]; }; struct control_message { - uint8_t msg_type; /* Message type. */ - uint8_t ept_addr; /* Endpoint address or zero for MSG_RELEASE_DATA. */ - uint8_t block_index; /* Block index to send or release. */ + /* Message type. */ + uint8_t msg_type; + /* Endpoint address or zero for MSG_RELEASE_DATA. */ + uint8_t ept_addr; + /* Block index to send or release. */ + uint8_t block_index; }; -BUILD_ASSERT(NUM_EPT <= EPT_ADDR_INVALID, "Too many endpoints"); - -#ifdef CONFIG_MULTITHREADING -/* Work queue for bounding processing. */ -static struct k_work_q ep_bound_work_q; -#endif +static struct ipc_instance ipc_instances[1]; /** * Calculate pointer to block from its index and channel configuration (RX or TX). * No validation is performed. */ -static struct block_content *block_from_index(const struct channel_config *ch_conf, - size_t block_index) +static struct block_content * +block_from_index(const struct channel_config *ch_conf, size_t block_index) { - return (struct block_content *)(ch_conf->blocks_ptr + - block_index * ch_conf->block_size); + return (struct block_content *)(ch_conf->blocks_ptr + block_index * ch_conf->block_size); } /** * Calculate pointer to data buffer from block index and channel configuration (RX or TX). * Also validate the index and optionally the buffer size allocated on the this block. - * - * @param[in] ch_conf The channel - * @param[in] block_index Block index - * @param[out] size Size of the buffer allocated on the block if not NULL. - * The size is also checked if it fits in the blocks area. - * If it is NULL, no size validation is performed. - * @param[in] invalidate_cache If size is not NULL, invalidates cache for entire buffer - * (all blocks). Otherwise, it is ignored. - * @return Pointer to data buffer or NULL if validation failed. */ -static uint8_t *buffer_from_index_validate(const struct channel_config *ch_conf, - size_t block_index, size_t *size, - bool invalidate_cache) +static uint8_t * +buffer_from_index_validate(const struct channel_config *ch_conf, + size_t block_index, size_t *size) { - size_t allocable_size; - size_t buffer_size; - uint8_t *end_ptr; - struct block_content *block; - - if (block_index >= ch_conf->block_count) { - LOG_ERR("Block index invalid"); - return NULL; - } - - block = block_from_index(ch_conf, block_index); - - if (size != NULL) { - if (invalidate_cache) { - sys_cache_data_invd_range(block, BLOCK_HEADER_SIZE); - __sync_synchronize(); - } - allocable_size = ch_conf->block_count * ch_conf->block_size; - end_ptr = ch_conf->blocks_ptr + allocable_size; - buffer_size = block->header.size; - - if ((buffer_size > allocable_size - BLOCK_HEADER_SIZE) || - (&block->data[buffer_size] > end_ptr)) { - LOG_ERR("Block corrupted"); - return NULL; - } - - *size = buffer_size; - if (invalidate_cache) { - sys_cache_data_invd_range(block->data, buffer_size); - __sync_synchronize(); - } - } - - return block->data; + size_t allocable_size; + size_t buffer_size; + uint8_t *end_ptr; + struct block_content *block; + + if (block_index >= ch_conf->block_count) { + /* Block index invalid */ + return NULL; + } + + block = block_from_index(ch_conf, block_index); + + if (size != NULL) { + allocable_size = ch_conf->block_count * ch_conf->block_size; + end_ptr = ch_conf->blocks_ptr + allocable_size; + buffer_size = block->header.size; + + if ((buffer_size > allocable_size - BLOCK_HEADER_SIZE) || + (&block->data[buffer_size] > end_ptr)) { + /* Block corrupted */ + return NULL; + } + + *size = buffer_size; + } + + return block->data; } -/** - * Calculate block index based on data buffer pointer and validate it. - * - * @param[in] ch_conf The channel - * @param[in] buffer Pointer to data buffer - * @param[out] size Size of the allocated buffer if not NULL. - * The size is also checked if it fits in the blocks area. - * If it is NULL, no size validation is performed. - * @return Block index or negative error code - * @retval -EINVAL The buffer is not correct - */ -static int buffer_to_index_validate(const struct channel_config *ch_conf, - const uint8_t *buffer, size_t *size) +static int +find_zero_bits(uint8_t bitmap[], size_t total_bits, + size_t n, size_t *start_index) { - size_t block_index; - uint8_t *expected; + size_t zero_count = 0; + size_t first_zero_bit_pos; + size_t bit_id; + size_t byte_id; + uint8_t bit_pos; + + /* Find the first sequence of n consecutive 0 bits */ + for (bit_id = 0; bit_id < total_bits; ++bit_id) { + byte_id = bit_id / 8; + bit_pos = bit_id % 8; + + if ((bitmap[byte_id] & (1 << bit_pos)) == 0) { + if (zero_count == 0) { + first_zero_bit_pos = bit_id; + } + zero_count++; + + if (zero_count == n) { + *start_index = first_zero_bit_pos; + return 0; + } + } else { + zero_count = 0; + } + } + + return -1; +} - block_index = (buffer - ch_conf->blocks_ptr) / ch_conf->block_size; +static void +alloc_bitmap_bits(uint8_t bitmap[], size_t n, size_t start_index) +{ + for (size_t i = 0; i < n; ++i) { + size_t bit_index = start_index + i; + size_t byte_index = bit_index / 8; + size_t bit_pos = bit_index % 8; - expected = buffer_from_index_validate(ch_conf, block_index, size, false); + bitmap[byte_index] |= (1 << bit_pos); + } +} - if (expected == NULL || expected != buffer) { - LOG_ERR("Pointer invalid"); - return -EINVAL; - } +static void +free_bitmap_bits(uint8_t bitmap[], size_t n, size_t start_index) +{ + for (size_t i = 0; i < n; ++i) { + size_t bit_index = start_index + i; + size_t byte_index = bit_index / 8; + size_t bit_pos = bit_index % 8; - return block_index; + bitmap[byte_index] &= ~(1 << bit_pos); + } } -/** - * Allocate buffer for transmission - * - * @param[in,out] size Required size of the buffer. If zero, first available block is - * allocated and all subsequent available blocks. Size actually - * allocated which is not less than requested. - * @param[out] buffer Allocated buffer data. - * @param[in] timeout Timeout. - * - * @return Positive index of the first allocated block or negative error. - * @retval -EINVAL If requested size is bigger than entire allocable space. - * @retval -ENOSPC If timeout was K_NO_WAIT and there was not enough space. - * @retval -EAGAIN If timeout occurred. - */ -static int alloc_tx_buffer(struct backend_data *dev_data, uint32_t *size, - uint8_t **buffer, k_timeout_t timeout) +static int +alloc_tx_buffer(struct ipc_instance *ipc, uint32_t size, uint8_t **buffer, + size_t *tx_block_index) { - const struct icbmsg_config *conf = dev_data->conf; - size_t total_size = *size + BLOCK_HEADER_SIZE; - size_t num_blocks = DIV_ROUND_UP(total_size, conf->tx.block_size); - struct block_content *block; -#ifdef CONFIG_MULTITHREADING - bool sem_taken = false; -#endif - size_t tx_block_index; - size_t next_bit; - int prev_bit_val; - int r; - -#ifdef CONFIG_MULTITHREADING - do { - /* Try to allocate specified number of blocks. */ - r = sys_bitarray_alloc(conf->tx_usage_bitmap, num_blocks, - &tx_block_index); - if (r == -ENOSPC && !K_TIMEOUT_EQ(timeout, K_NO_WAIT)) { - /* Wait for releasing if there is no enough space and exit loop - * on timeout. - */ - r = k_sem_take(&dev_data->block_wait_sem, timeout); - if (r < 0) { - break; - } - sem_taken = true; - } else { - /* Exit loop if space was allocated or other error occurred. */ - break; - } - } while (true); - - /* If semaphore was taken, give it back because this thread does not - * necessary took all available space, so other thread may need it. - */ - if (sem_taken) { - k_sem_give(&dev_data->block_wait_sem); - } -#else - /* Try to allocate specified number of blocks. */ - r = sys_bitarray_alloc(conf->tx_usage_bitmap, num_blocks, &tx_block_index); -#endif - - if (r < 0) { - if (r != -ENOSPC && r != -EAGAIN) { - LOG_ERR("Failed to allocate buffer, err: %d", r); - /* Only -EINVAL is allowed in this place. Any other code - * indicates something wrong with the logic. - */ - __ASSERT_NO_MSG(r == -EINVAL); - } - - if (r == -ENOSPC || r == -EINVAL) { - /* IPC service require -ENOMEM error in case of no memory. */ - r = -ENOMEM; - } - return r; - } - - /* If size is 0 try to allocate more blocks after already allocated. */ - if (*size == 0) { - prev_bit_val = 0; - for (next_bit = tx_block_index + 1; next_bit < conf->tx.block_count; - next_bit++) { - r = sys_bitarray_test_and_set_bit(conf->tx_usage_bitmap, next_bit, - &prev_bit_val); - /** Setting bit should always success. */ - __ASSERT_NO_MSG(r == 0); - if (prev_bit_val) { - break; - } - } - num_blocks = next_bit - tx_block_index; - } - - /* Get block pointer and adjust size to actually allocated space. */ - *size = conf->tx.block_size * num_blocks - BLOCK_HEADER_SIZE; - block = block_from_index(&conf->tx, tx_block_index); - block->header.size = *size; - *buffer = block->data; - return tx_block_index; + int rc; + struct block_content *block; + size_t total_bits = sizeof(ipc->tx_usage_bitmap) * 8; + size_t total_size = size + BLOCK_HEADER_SIZE; + size_t num_blocks = DIV_ROUND_UP(total_size, ipc->tx.block_size); + + rc = find_zero_bits(ipc->tx_usage_bitmap, total_bits, + num_blocks, tx_block_index); + if (rc) { + return rc; + } + + alloc_bitmap_bits(ipc->tx_usage_bitmap, num_blocks, *tx_block_index); + + /* Get block pointer and adjust size to actually allocated space. */ + size = ipc->tx.block_size * num_blocks - BLOCK_HEADER_SIZE; + block = block_from_index(&ipc->tx, *tx_block_index); + block->header.size = size; + *buffer = block->data; + + return 0; } /** - * Release all or part of the blocks occupied by the buffer. - * - * @param[in] tx_block_index First block index to release, no validation is performed, - * so caller is responsible for passing valid index. - * @param[in] size Size of data buffer, no validation is performed, - * so caller is responsible for passing valid size. - * @param[in] new_size If less than zero, release all blocks, otherwise reduce - * size to this value and update size in block header. - * - * @returns Positive block index where the buffer starts or negative error. - * @retval -EINVAL If invalid buffer was provided or size is greater than already - * allocated size. + * Allocate buffer for transmission. */ -static int release_tx_blocks(struct backend_data *dev_data, size_t tx_block_index, - size_t size, int new_size) +int +ipc_icbmsg_alloc_tx_buf(uint8_t ipc_id, struct ipc_icmsg_buf *buf, uint32_t size) { - const struct icbmsg_config *conf = dev_data->conf; - struct block_content *block; - size_t num_blocks; - size_t total_size; - size_t new_total_size; - size_t new_num_blocks; - size_t release_index; - int r; - - /* Calculate number of blocks. */ - total_size = size + BLOCK_HEADER_SIZE; - num_blocks = DIV_ROUND_UP(total_size, conf->tx.block_size); - - if (new_size >= 0) { - /* Calculate and validate new values. */ - new_total_size = new_size + BLOCK_HEADER_SIZE; - new_num_blocks = DIV_ROUND_UP(new_total_size, conf->tx.block_size); - if (new_num_blocks > num_blocks) { - LOG_ERR("Requested %d blocks, allocated %d", new_num_blocks, - num_blocks); - return -EINVAL; - } - /* Update actual buffer size and number of blocks to release. */ - block = block_from_index(&conf->tx, tx_block_index); - block->header.size = new_size; - release_index = tx_block_index + new_num_blocks; - num_blocks = num_blocks - new_num_blocks; - } else { - /* If size is negative, release all blocks. */ - release_index = tx_block_index; - } - - if (num_blocks > 0) { - /* Free bits in the bitmap. */ - r = sys_bitarray_free(conf->tx_usage_bitmap, num_blocks, - release_index); - if (r < 0) { - LOG_ERR("Cannot free bits, err %d", r); - return r; - } - -#ifdef CONFIG_MULTITHREADING - /* Wake up all waiting threads. */ - k_sem_give(&dev_data->block_wait_sem); -#endif - } + struct ipc_instance *ipc = &ipc_instances[ipc_id]; - return tx_block_index; + return alloc_tx_buffer(ipc, size, &buf->data, &buf->block_id); } /** * Release all or part of the blocks occupied by the buffer. - * - * @param[in] buffer Buffer to release. - * @param[in] new_size If less than zero, release all blocks, otherwise reduce size to - * this value and update size in block header. - * - * @returns Positive block index where the buffer starts or negative error. - * @retval -EINVAL If invalid buffer was provided or size is greater than already - * allocated size. */ -static int release_tx_buffer(struct backend_data *dev_data, const uint8_t *buffer, - int new_size) +static void +release_tx_blocks(struct ipc_instance *ipc, size_t release_index, size_t size) { - const struct icbmsg_config *conf = dev_data->conf; - size_t size = 0; - int tx_block_index; + size_t num_blocks; + size_t total_size; - tx_block_index = buffer_to_index_validate(&conf->tx, buffer, &size); - if (tx_block_index < 0) { - return tx_block_index; - } + /* Calculate number of blocks. */ + total_size = size + BLOCK_HEADER_SIZE; + num_blocks = DIV_ROUND_UP(total_size, ipc->tx.block_size); - return release_tx_blocks(dev_data, tx_block_index, size, new_size); + if (num_blocks > 0) { + free_bitmap_bits(ipc->tx_usage_bitmap, num_blocks, release_index); + } } /** @@ -499,747 +401,394 @@ static int release_tx_buffer(struct backend_data *dev_data, const uint8_t *buffe * ICMsg may return error on concurrent invocations even when there is enough space * in queue. */ -static int send_control_message(struct backend_data *dev_data, enum msg_type msg_type, - uint8_t ept_addr, uint8_t block_index) +static int +send_control_message(struct ept_data *ept, enum msg_type msg_type, uint8_t block_index) { - const struct icbmsg_config *conf = dev_data->conf; - const struct control_message message = { - .msg_type = (uint8_t)msg_type, - .ept_addr = ept_addr, - .block_index = block_index, - }; - int r; - -#ifdef CONFIG_MULTITHREADING - k_mutex_lock(&dev_data->mutex, K_FOREVER); -#endif - r = icmsg_send(&conf->control_config, &dev_data->control_data, &message, - sizeof(message)); -#ifdef CONFIG_MULTITHREADING - k_mutex_unlock(&dev_data->mutex); -#endif - if (r < sizeof(message)) { - LOG_ERR("Cannot send over ICMsg, err %d", r); - } - return r; -} + int ret; + struct ipc_instance *ipc = ept->ipc; -/** - * Release received buffer. This function will just send release control message. - * - * @param[in] buffer Buffer to release. - * @param[in] msg_type Message type: MSG_RELEASE_BOUND or MSG_RELEASE_DATA. - * @param[in] ept_addr Endpoint address or zero for MSG_RELEASE_DATA. - * - * @return zero or ICMsg send error. - */ -static int send_release(struct backend_data *dev_data, const uint8_t *buffer, - enum msg_type msg_type, uint8_t ept_addr) -{ - const struct icbmsg_config *conf = dev_data->conf; - int rx_block_index; + const struct control_message message = { + .msg_type = (uint8_t)msg_type, + .ept_addr = ept->addr, + .block_index = block_index, + }; + + if (ipc->state != ICMSG_STATE_READY) { + return -EBUSY; + } + + ret = pbuf_write(&ipc->tx_pb, (const char *)&message, sizeof(message)); - rx_block_index = buffer_to_index_validate(&conf->rx, buffer, NULL); - if (rx_block_index < 0) { - return rx_block_index; - } + if (ret < 0) { + return ret; + } else if (ret < sizeof(message)) { + return -EBADMSG; + } - return send_control_message(dev_data, msg_type, ept_addr, rx_block_index); + ipc_signal(ept->cfg->tx_channel); + + return 0; } /** - * Send data contained in specified block. It will adjust data size and flush cache - * if necessary. If sending failed, allocated blocks will be released. - * - * @param[in] msg_type Message type: MSG_BOUND or MSG_DATA. - * @param[in] ept_addr Endpoints address. - * @param[in] tx_block_index Index of first block containing data, it is not validated, - * so caller is responsible for passing only valid index. - * @param[in] size Actual size of the data, can be smaller than allocated, - * but it cannot change number of required blocks. - * - * @return number of bytes sent in the message or negative error code. + * Send data contained in specified block. */ -static int send_block(struct backend_data *dev_data, enum msg_type msg_type, - uint8_t ept_addr, size_t tx_block_index, size_t size) +static int +send_block(struct ept_data *ept, enum msg_type msg_type, size_t tx_block_index, size_t size) { - struct block_content *block; - int r; - - block = block_from_index(&dev_data->conf->tx, tx_block_index); + int r; + struct ipc_instance *ipc = ept->ipc; + struct block_content *block; - block->header.size = size; - __sync_synchronize(); - sys_cache_data_flush_range(block, size + BLOCK_HEADER_SIZE); + block = block_from_index(&ipc->tx, tx_block_index); + block->header.size = size; - r = send_control_message(dev_data, msg_type, ept_addr, tx_block_index); - if (r < 0) { - release_tx_blocks(dev_data, tx_block_index, size, -1); - } + r = send_control_message(ept, msg_type, tx_block_index); + if (r < 0) { + release_tx_blocks(ipc, tx_block_index, size); + } - return r; + return r; } /** * Find endpoint that was registered with name that matches name * contained in the endpoint bound message received from remote. - * - * @param[in] name Endpoint name, it must be in a received block. - * - * @return Found endpoint index or -ENOENT if not found. */ -static int find_ept_by_name(struct backend_data *dev_data, const char *name) +static int +find_ept_by_name(struct ipc_instance *ipc, const char *name) { - const struct channel_config *rx_conf = &dev_data->conf->rx; - const char *buffer_end = (const char *)rx_conf->blocks_ptr + - rx_conf->block_count * rx_conf->block_size; - struct ept_data *ept; - size_t name_size; - size_t i; - - /* Requested name is in shared memory, so we have to assume that it - * can be corrupted. Extra care must be taken to avoid out of - * bounds reads. - */ - name_size = strnlen(name, buffer_end - name - 1) + 1; - - for (i = 0; i < NUM_EPT; i++) { - ept = &dev_data->ept[i]; - if (atomic_get(&ept->state) == EPT_CONFIGURED && - strncmp(ept->cfg->name, name, name_size) == 0) { - return i; - } - } - - return -ENOENT; -} - -/** - * Find registered endpoint that matches given "bound endpoint" message. When found, - * the "release bound endpoint" message is send. - * - * @param[in] rx_block_index Block containing the "bound endpoint" message. - * @param[in] ept_addr Endpoint address. - * - * @return negative error code or non-negative search result. - * @retval 0 match not found. - * @retval 1 match found and processing was successful. - */ -static int match_bound_msg(struct backend_data *dev_data, size_t rx_block_index, - uint8_t ept_addr) -{ - const struct icbmsg_config *conf = dev_data->conf; - struct block_content *block; - uint8_t *buffer; - int ept_index; - struct ept_data *ept; - int r; - bool valid_state; - - /* Find endpoint that matches requested name. */ - block = block_from_index(&conf->rx, rx_block_index); - buffer = block->data; - ept_index = find_ept_by_name(dev_data, buffer); - if (ept_index < 0) { - return 0; - } - - /* Set endpoint address and mapping. Move it to "ready" state. */ - ept = &dev_data->ept[ept_index]; - ept->addr = ept_addr; - dev_data->ept_map[ept->addr] = ept_index; - valid_state = atomic_cas(&ept->state, EPT_CONFIGURED, EPT_READY); - if (!valid_state) { - LOG_ERR("Unexpected bounding from remote on endpoint %d", ept_addr); - return -EINVAL; - } - - /* Endpoint is ready to send messages, so call bound callback. */ - if (ept->cfg->cb.bound != NULL) { - ept->cfg->cb.bound(ept->cfg->priv); - } - - /* Release the bound message and inform remote that we are ready to receive. */ - r = send_release(dev_data, buffer, MSG_RELEASE_BOUND, ept_addr); - if (r < 0) { - return r; - } - - return 1; + const struct channel_config *rx_conf = &ipc->rx; + const char *buffer_end = (const char *)rx_conf->blocks_ptr + + rx_conf->block_count * rx_conf->block_size; + struct ept_data *ept; + size_t name_size; + size_t i; + + /* Requested name is in shared memory, so we have to assume that it + * can be corrupted. Extra care must be taken to avoid out of + * bounds reads. + */ + name_size = strnlen(name, buffer_end - name - 1) + 1; + + for (i = 0; i < NUM_EPT; i++) { + ept = &ipc->ept[i]; + if (ept->state == EPT_CONFIGURED && + strncmp(ept->cfg->name, name, name_size) == 0) { + return i; + } + } + + return -ENOENT; } /** * Send bound message on specified endpoint. - * - * @param[in] ept Endpoint to use. - * - * @return non-negative value in case of success or negative error code. */ -static int send_bound_message(struct backend_data *dev_data, struct ept_data *ept) +static int +send_bound_message(struct ept_data *ept) { - size_t msg_len; - uint32_t alloc_size; - uint8_t *buffer; - int r; - - msg_len = strlen(ept->cfg->name) + 1; - alloc_size = msg_len; - r = alloc_tx_buffer(dev_data, &alloc_size, &buffer, K_FOREVER); - if (r >= 0) { - strcpy(buffer, ept->cfg->name); - r = send_block(dev_data, MSG_BOUND, ept->addr, r, msg_len); - } - - return r; + int rc; + size_t msg_len; + uint8_t *buffer; + size_t tx_block_index; + + msg_len = strlen(ept->cfg->name) + 1; + rc = alloc_tx_buffer(ept->ipc, msg_len, &buffer, &tx_block_index); + if (rc >= 0) { + strcpy((char *)buffer, ept->cfg->name); + rc = send_block(ept, MSG_BOUND, tx_block_index, msg_len); + } + + return rc; } -#ifdef CONFIG_MULTITHREADING /** - * Put endpoint bound processing into system workqueue. + * Get endpoint from endpoint address. Also validates if the address is correct and + * endpoint is in correct state for receiving. */ -static void schedule_ept_bound_process(struct backend_data *dev_data) +static struct ept_data * +get_ept_and_rx_validate(struct ipc_instance *ipc, uint8_t ept_addr) { - k_work_submit_to_queue(&ep_bound_work_q, &dev_data->ep_bound_work); -} -#endif + struct ept_data *ept; -/** - * Work handler that is responsible to start bounding when ICMsg is bound. - */ -#ifdef CONFIG_MULTITHREADING -static void ept_bound_process(struct k_work *item) -#else -static void ept_bound_process(struct backend_data *dev_data) -#endif -{ -#ifdef CONFIG_MULTITHREADING - struct backend_data *dev_data = CONTAINER_OF(item, struct backend_data, - ep_bound_work); -#endif - struct ept_data *ept = NULL; - size_t i; - int r = 0; - bool matching_state; - - /* Skip processing if ICMsg was not bounded yet. */ - if (!(atomic_get(&dev_data->flags) & CONTROL_BOUNDED)) { - return; - } - - if (dev_data->is_initiator) { - /* Initiator just sends bound message after endpoint was registered. */ - for (i = 0; i < NUM_EPT; i++) { - ept = &dev_data->ept[i]; - matching_state = atomic_cas(&ept->state, EPT_CONFIGURED, - EPT_BOUNDING); - if (matching_state) { - r = send_bound_message(dev_data, ept); - if (r < 0) { - atomic_set(&ept->state, EPT_UNCONFIGURED); - LOG_ERR("Failed to send bound, err %d", r); - } - } - } - } else { - /* Walk over all waiting bound messages and match to local endpoints. */ -#ifdef CONFIG_MULTITHREADING - k_mutex_lock(&dev_data->mutex, K_FOREVER); -#endif - for (i = 0; i < NUM_EPT; i++) { - if (dev_data->waiting_bound[i] != WAITING_BOUND_MSG_EMPTY) { -#ifdef CONFIG_MULTITHREADING - k_mutex_unlock(&dev_data->mutex); -#endif - r = match_bound_msg(dev_data, - dev_data->waiting_bound[i], i); -#ifdef CONFIG_MULTITHREADING - k_mutex_lock(&dev_data->mutex, K_FOREVER); -#endif - if (r != 0) { - dev_data->waiting_bound[i] = - WAITING_BOUND_MSG_EMPTY; - if (r < 0) { - LOG_ERR("Failed bound, err %d", r); - } - } - } - } -#ifdef CONFIG_MULTITHREADING - k_mutex_unlock(&dev_data->mutex); -#endif - } -} + if (ept_addr >= NUM_EPT) { + return NULL; + } -/** - * Get endpoint from endpoint address. Also validates if the address is correct and - * endpoint is in correct state for receiving. If bounding callback was not called yet, - * then call it. - */ -static struct ept_data *get_ept_and_rx_validate(struct backend_data *dev_data, - uint8_t ept_addr) -{ - struct ept_data *ept; - enum ept_bounding_state state; - - if (ept_addr >= NUM_EPT || dev_data->ept_map[ept_addr] >= NUM_EPT) { - LOG_ERR("Received invalid endpoint addr %d", ept_addr); - return NULL; - } - - ept = &dev_data->ept[dev_data->ept_map[ept_addr]]; - - state = atomic_get(&ept->state); - - if (state == EPT_READY) { - /* Valid state - nothing to do. */ - } else if (state == EPT_BOUNDING) { - /* Endpoint bound callback was not called yet - call it. */ - atomic_set(&ept->state, EPT_READY); - if (ept->cfg->cb.bound != NULL) { - ept->cfg->cb.bound(ept->cfg->priv); - } - } else { - LOG_ERR("Invalid state %d of receiving endpoint %d", state, ept->addr); - return NULL; - } - - return ept; -} + ept = &ipc->ept[ept_addr]; -/** - * Data message received. - */ -static int received_data(struct backend_data *dev_data, size_t rx_block_index, - uint8_t ept_addr) -{ - const struct icbmsg_config *conf = dev_data->conf; - uint8_t *buffer; - struct ept_data *ept; - size_t size; - int bit_val; - - /* Validate. */ - buffer = buffer_from_index_validate(&conf->rx, rx_block_index, &size, true); - ept = get_ept_and_rx_validate(dev_data, ept_addr); - if (buffer == NULL || ept == NULL) { - LOG_ERR("Received invalid block index %d or addr %d", rx_block_index, - ept_addr); - return -EINVAL; - } - - /* Clear bit. If cleared, specific block will not be hold after the callback. */ - sys_bitarray_clear_bit(conf->rx_hold_bitmap, rx_block_index); - - /* Call the endpoint callback. It can set the hold bit. */ - ept->cfg->cb.received(buffer, size, ept->cfg->priv); - - /* If the bit is still cleared, request release of the buffer. */ - sys_bitarray_test_bit(conf->rx_hold_bitmap, rx_block_index, &bit_val); - if (!bit_val) { - send_release(dev_data, buffer, MSG_RELEASE_DATA, 0); - } - - return 0; -} + if (ept->state == EPT_READY) { + /* Valid state - nothing to do. */ + } else if (ept->state == EPT_BOUNDING) { + /* The remote endpoint is ready */ + ept->state = EPT_READY; + } else { + return NULL; + } -/** - * Release data message received. - */ -static int received_release_data(struct backend_data *dev_data, size_t tx_block_index) -{ - const struct icbmsg_config *conf = dev_data->conf; - uint8_t *buffer; - size_t size; - int r; - - /* Validate. */ - buffer = buffer_from_index_validate(&conf->tx, tx_block_index, &size, false); - if (buffer == NULL) { - LOG_ERR("Received invalid block index %d", tx_block_index); - return -EINVAL; - } - - /* Release. */ - r = release_tx_blocks(dev_data, tx_block_index, size, -1); - if (r < 0) { - return r; - } - - return r; + return ept; } /** - * Bound endpoint message received. + * Data message received. */ -static int received_bound(struct backend_data *dev_data, size_t rx_block_index, - uint8_t ept_addr) +static int +received_data(struct ipc_instance *ipc, size_t rx_block_index, uint8_t ept_addr) { - const struct icbmsg_config *conf = dev_data->conf; - size_t size; - uint8_t *buffer; - - /* Validate */ - buffer = buffer_from_index_validate(&conf->rx, rx_block_index, &size, true); - if (buffer == NULL) { - LOG_ERR("Received invalid block index %d", rx_block_index); - return -EINVAL; - } - - /* Put message to waiting array. */ -#ifdef CONFIG_MULTITHREADING - k_mutex_lock(&dev_data->mutex, K_FOREVER); -#endif - dev_data->waiting_bound[ept_addr] = rx_block_index; -#ifdef CONFIG_MULTITHREADING - k_mutex_unlock(&dev_data->mutex); -#endif + uint8_t *buffer; + struct ept_data *ept; + size_t size; -#ifdef CONFIG_MULTITHREADING - /* Schedule processing the message. */ - schedule_ept_bound_process(dev_data); -#else - ept_bound_process(dev_data); -#endif + /* Validate. */ + buffer = buffer_from_index_validate(&ipc->rx, rx_block_index, &size); + ept = get_ept_and_rx_validate(ipc, ept_addr); + if (buffer == NULL || ept == NULL) { + return -EINVAL; + } - return 0; -} + /* Call the endpoint callback. */ + ept->cfg->cb.received(buffer, size, ept->cfg->user_data); -/** - * Callback called by ICMsg that handles message (data or endpoint bound) received - * from the remote. - * - * @param[in] data Message received from the ICMsg. - * @param[in] len Number of bytes of data. - * @param[in] priv Opaque pointer to device instance. - */ -static void control_received(const void *data, size_t len, void *priv) -{ - const struct device *instance = priv; - struct backend_data *dev_data = instance->data; - const struct control_message *message = (const struct control_message *)data; - struct ept_data *ept; - uint8_t ept_addr; - int r = 0; - - /* Allow messages longer than 3 bytes, e.g. for future protocol versions. */ - if (len < sizeof(struct control_message)) { - r = -EINVAL; - goto exit; - } - - ept_addr = message->ept_addr; - if (ept_addr >= NUM_EPT) { - r = -EINVAL; - goto exit; - } - - switch (message->msg_type) { - case MSG_RELEASE_DATA: - r = received_release_data(dev_data, message->block_index); - break; - case MSG_RELEASE_BOUND: - r = received_release_data(dev_data, message->block_index); - if (r >= 0) { - ept = get_ept_and_rx_validate(dev_data, ept_addr); - if (ept == NULL) { - r = -EINVAL; - } - } - break; - case MSG_BOUND: - r = received_bound(dev_data, message->block_index, ept_addr); - break; - case MSG_DATA: - r = received_data(dev_data, message->block_index, ept_addr); - break; - default: - /* Silently ignore other messages types. They can be used in future - * protocol version. - */ - break; - } - -exit: - if (r < 0) { - LOG_ERR("Failed to receive, err %d", r); - } + /* Release the buffer */ + send_control_message(ept, MSG_RELEASE_DATA, rx_block_index); + + return 0; } /** - * Callback called when ICMsg is bound. + * Release data message received. */ -static void control_bound(void *priv) +static int +received_release_data(struct ipc_instance *ipc, size_t tx_block_index) { - const struct device *instance = priv; - struct backend_data *dev_data = instance->data; + size_t size; + uint8_t *buffer; - /* Set flag that ICMsg is bounded and now, endpoint bounding may start. */ - atomic_or(&dev_data->flags, CONTROL_BOUNDED); -#ifdef CONFIG_MULTITHREADING - schedule_ept_bound_process(dev_data); -#else - ept_bound_process(dev_data); -#endif -} + /* Validate. */ + buffer = buffer_from_index_validate(&ipc->tx, tx_block_index, &size); + if (buffer == NULL) { + return -EINVAL; + } -/** - * Open the backend instance callback. - */ -static int open(const struct device *instance) -{ - const struct icbmsg_config *conf = instance->config; - struct backend_data *dev_data = instance->data; - - static const struct ipc_service_cb cb = { - .bound = control_bound, - .received = control_received, - .error = NULL, - }; - - LOG_DBG("Open instance 0x%08X, initiator=%d", (uint32_t)instance, - dev_data->is_initiator ? 1 : 0); - LOG_DBG(" TX %d blocks of %d bytes at 0x%08X, max allocable %d bytes", - (uint32_t)conf->tx.block_count, - (uint32_t)conf->tx.block_size, - (uint32_t)conf->tx.blocks_ptr, - (uint32_t)(conf->tx.block_size * conf->tx.block_count - - BLOCK_HEADER_SIZE)); - LOG_DBG(" RX %d blocks of %d bytes at 0x%08X, max allocable %d bytes", - (uint32_t)conf->rx.block_count, - (uint32_t)conf->rx.block_size, - (uint32_t)conf->rx.blocks_ptr, - (uint32_t)(conf->rx.block_size * conf->rx.block_count - - BLOCK_HEADER_SIZE)); - - return icmsg_open(&conf->control_config, &dev_data->control_data, &cb, - (void *)instance); -} + /* Release. */ + release_tx_blocks(ipc, tx_block_index, size); -/** - * Endpoint send callback function (with copy). - */ -static int send(const struct device *instance, void *token, const void *msg, size_t len) -{ - struct backend_data *dev_data = instance->data; - struct ept_data *ept = token; - uint32_t alloc_size; - uint8_t *buffer; - int r; - - /* Allocate the buffer. */ - alloc_size = len; - r = alloc_tx_buffer(dev_data, &alloc_size, &buffer, K_NO_WAIT); - if (r < 0) { - return r; - } - - /* Copy data to allocated buffer. */ - memcpy(buffer, msg, len); - - /* Send data message. */ - r = send_block(dev_data, MSG_DATA, ept->addr, r, len); - if (r < 0) { - return r; - } - - return len; + return 0; } /** - * Backend endpoint registration callback. + * Bound endpoint message received. */ -static int register_ept(const struct device *instance, void **token, - const struct ipc_ept_cfg *cfg) +static int +received_bound(struct ipc_instance *ipc, size_t rx_block_index, uint8_t rem_ept_addr) { - struct backend_data *dev_data = instance->data; - struct ept_data *ept = NULL; - int ept_index; - int r = 0; - - /* Reserve new endpoint index. */ - ept_index = atomic_inc(&dev_data->flags) & FLAG_EPT_COUNT_MASK; - if (ept_index >= NUM_EPT) { - LOG_ERR("Too many endpoints"); - __ASSERT_NO_MSG(false); - return -ENOMEM; - } - - /* Add new endpoint. */ - ept = &dev_data->ept[ept_index]; - ept->cfg = cfg; - if (dev_data->is_initiator) { - ept->addr = ept_index; - dev_data->ept_map[ept->addr] = ept->addr; - } - atomic_set(&ept->state, EPT_CONFIGURED); - - /* Keep endpoint address in token. */ - *token = ept; - -#ifdef CONFIG_MULTITHREADING - /* Rest of the bounding will be done in the system workqueue. */ - schedule_ept_bound_process(dev_data); -#else - ept_bound_process(dev_data); -#endif + struct ept_data *ept; + uint8_t *buffer; + size_t size; + uint8_t ept_addr; - return r; -} + /* Validate */ + buffer = buffer_from_index_validate(&ipc->rx, rx_block_index, &size); + if (buffer == NULL) { + /* Received invalid block index */ + return -1; + } -/** - * Returns maximum TX buffer size. - */ -static int get_tx_buffer_size(const struct device *instance, void *token) -{ - const struct icbmsg_config *conf = instance->config; + ept_addr = find_ept_by_name(ipc, (const char *)buffer); + if (ept_addr < 0) { + return 0; + } - return conf->tx.block_size * conf->tx.block_count - BLOCK_HEADER_SIZE; -} + /* Set the remote endpoint address */ + ept = &ipc->ept[ept_addr]; + ept->addr = rem_ept_addr; -/** - * Endpoint TX buffer allocation callback for nocopy sending. - */ -static int get_tx_buffer(const struct device *instance, void *token, void **data, - uint32_t *user_len, k_timeout_t wait) -{ - struct backend_data *dev_data = instance->data; - int r; - - r = alloc_tx_buffer(dev_data, user_len, (uint8_t **)data, wait); - if (r < 0) { - return r; - } - return 0; -} + if (ept->state != EPT_CONFIGURED) { + /* Unexpected bounding from remote on endpoint */ + return -EINVAL; + } -/** - * Endpoint TX buffer release callback for nocopy sending. - */ -static int drop_tx_buffer(const struct device *instance, void *token, const void *data) -{ - struct backend_data *dev_data = instance->data; - int r; + ept->state = EPT_READY; - r = release_tx_buffer(dev_data, data, -1); - if (r < 0) { - return r; - } + send_control_message(ept, MSG_RELEASE_BOUND, rx_block_index); - return 0; + return 0; } /** - * Endpoint nocopy sending. + * Handles ICMsg control messages received from the remote. */ -static int send_nocopy(const struct device *instance, void *token, const void *data, - size_t len) +static void +control_received(struct ipc_instance *ipc, const struct control_message *message) { - struct backend_data *dev_data = instance->data; - struct ept_data *ept = token; - int r; - - /* Actual size may be smaller than requested, so shrink if possible. */ - r = release_tx_buffer(dev_data, data, len); - if (r < 0) { - release_tx_buffer(dev_data, data, -1); - return r; - } - - return send_block(dev_data, MSG_DATA, ept->addr, r, len); + uint8_t ept_addr; + + ept_addr = message->ept_addr; + if (ept_addr >= NUM_EPT) { + return; + } + + switch (message->msg_type) { + case MSG_RELEASE_DATA: + received_release_data(ipc, message->block_index); + break; + case MSG_RELEASE_BOUND: + assert(received_release_data(ipc, message->block_index) == 0); + assert(get_ept_and_rx_validate(ipc, ept_addr) != NULL); + break; + case MSG_BOUND: + received_bound(ipc, message->block_index, ept_addr); + break; + case MSG_DATA: + received_data(ipc, message->block_index, ept_addr); + break; + default: + /* Silently ignore other messages types. They can be used in future + * protocol version. + */ + break; + } } -/** - * Holding RX buffer for nocopy receiving. - */ -static int hold_rx_buffer(const struct device *instance, void *token, void *data) +void +ipc_process_signal(uint8_t ipc_id) { - const struct icbmsg_config *conf = instance->config; - int rx_block_index; - uint8_t *buffer = data; - - /* Calculate block index and set associated bit. */ - rx_block_index = buffer_to_index_validate(&conf->rx, buffer, NULL); - __ASSERT_NO_MSG(rx_block_index >= 0); - return sys_bitarray_set_bit(conf->rx_hold_bitmap, rx_block_index); + int icmsg_len; + struct ipc_instance *ipc = &ipc_instances[ipc_id]; + + icmsg_len = pbuf_read(&ipc->rx_pb, NULL, 0); + if (icmsg_len <= 0) { + /* Unlikely, no data in buffer. */ + return; + } + + if (sizeof(icmsg_rx_buffer) < icmsg_len) { + return; + } + + icmsg_len = pbuf_read(&ipc->rx_pb, (char *)icmsg_rx_buffer, sizeof(icmsg_rx_buffer)); + + if (ipc->state == ICMSG_STATE_READY) { + if (icmsg_len < sizeof(struct control_message)) { + return; + } + + control_received(ipc, (const struct control_message *)icmsg_rx_buffer); + } else { + /* After core restart, the first message in ICMsg area should be + * the magic string. + */ + assert(ipc->state == ICMSG_STATE_BUSY); + + /* Allow magic number longer than sizeof(magic) for future protocol version. */ + bool endpoint_invalid = (icmsg_len < sizeof(magic) || + memcmp(magic, icmsg_rx_buffer, sizeof(magic))); + + assert(!endpoint_invalid); + + /* Set flag that ICMsg is bounded and now, endpoint bounding may start. */ + ipc->flags |= CONTROL_BOUNDED; + ipc->state = ICMSG_STATE_READY; + } } /** - * Release RX buffer that was previously held. + * Send to endpoint (without copy). */ -static int release_rx_buffer(const struct device *instance, void *token, void *data) +int +ipc_icbmsg_send_buf(uint8_t ipc_id, uint8_t ept_addr, struct ipc_icmsg_buf *buf) { - struct backend_data *dev_data = instance->data; + struct ipc_instance *ipc = &ipc_instances[ipc_id]; + struct ept_data *ept = &ipc->ept[ept_addr]; - return send_release(dev_data, (uint8_t *)data, MSG_RELEASE_DATA, 0); + /* Send data message. */ + return send_block(ept, MSG_DATA, buf->block_id, buf->len); } /** - * Backend device initialization. + * Send to endpoint (with copy). */ -static int backend_init(const struct device *instance) +int +ipc_icbmsg_send(uint8_t ipc_id, uint8_t ept_addr, const void *data, uint16_t len) { - const struct icbmsg_config *conf = instance->config; - struct backend_data *dev_data = instance->data; -#ifdef CONFIG_MULTITHREADING - static K_THREAD_STACK_DEFINE(ep_bound_work_q_stack, EP_BOUND_WORK_Q_STACK_SIZE); - static bool is_work_q_started; - - if (!is_work_q_started) { - k_work_queue_init(&ep_bound_work_q); - k_work_queue_start(&ep_bound_work_q, ep_bound_work_q_stack, - K_THREAD_STACK_SIZEOF(ep_bound_work_q_stack), - EP_BOUND_WORK_Q_PRIORITY, NULL); - - is_work_q_started = true; - } -#endif - - dev_data->conf = conf; - dev_data->is_initiator = (conf->rx.blocks_ptr < conf->tx.blocks_ptr); -#ifdef CONFIG_MULTITHREADING - k_mutex_init(&dev_data->mutex); - k_work_init(&dev_data->ep_bound_work, ept_bound_process); - k_sem_init(&dev_data->block_wait_sem, 0, 1); -#endif - memset(&dev_data->waiting_bound, 0xFF, sizeof(dev_data->waiting_bound)); - memset(&dev_data->ept_map, EPT_ADDR_INVALID, sizeof(dev_data->ept_map)); - return 0; + int rc; + uint8_t *buffer; + struct ipc_instance *ipc = &ipc_instances[ipc_id]; + struct ept_data *ept = &ipc->ept[ept_addr]; + size_t tx_block_index; + + /* Allocate the buffer. */ + rc = alloc_tx_buffer(ipc, len, &buffer, &tx_block_index); + if (rc < 0) { + return rc; + } + + /* Copy data to allocated buffer. */ + memcpy(buffer, data, len); + + /* Send data message. */ + rc = send_block(ept, MSG_DATA, tx_block_index, len); + if (rc < 0) { + return rc; + } + + return 0; } /** - * IPC service backend callbacks. + * Register new endpoint */ -const static struct ipc_service_backend backend_ops = { - .open_instance = open, - .close_instance = NULL, /* not implemented */ - .send = send, - .register_endpoint = register_ept, - .deregister_endpoint = NULL, /* not implemented */ - .get_tx_buffer_size = get_tx_buffer_size, - .get_tx_buffer = get_tx_buffer, - .drop_tx_buffer = drop_tx_buffer, - .send_nocopy = send_nocopy, - .hold_rx_buffer = hold_rx_buffer, - .release_rx_buffer = release_rx_buffer, -}; +uint8_t +ipc_icmsg_register_ept(uint8_t ipc_id, struct ipc_ept_cfg *cfg) +{ + int rc; + struct ipc_instance *ipc; + struct ept_data *ept = NULL; + uint8_t ept_addr; + + assert(ipc_id < sizeof(ipc_instances)); + ipc = &ipc_instances[ipc_id]; + + /* Reserve new endpoint index. */ + ept_addr = (++ipc->flags) & FLAG_EPT_COUNT_MASK; + assert(ept_addr < NUM_EPT); + + /* Add new endpoint. */ + ept = &ipc->ept[ept_addr]; + ept->ipc = ipc; + ept->state = EPT_CONFIGURED; + ept->cfg = cfg; + + if (ipc->is_initiator) { + ept->addr = ept_addr; + ept->state = EPT_BOUNDING; + + rc = send_bound_message(ept); + assert(rc == 0); + } else { + ept->addr = EPT_ADDR_INVALID; + } + + return ept_addr; +} /** * Number of bytes per each ICMsg message. It is used to calculate size of ICMsg area. */ -#define BYTES_PER_ICMSG_MESSAGE (ROUND_UP(sizeof(struct control_message), \ - sizeof(void *)) + PBUF_PACKET_LEN_SZ) +#define BYTES_PER_ICMSG_MESSAGE (ROUND_UP(sizeof(struct control_message), \ + sizeof(void *)) + PBUF_PACKET_LEN_SZ) /** * Maximum ICMsg overhead. It is used to calculate size of ICMsg area. */ -#define ICMSG_BUFFER_OVERHEAD(i) \ - (PBUF_HEADER_OVERHEAD(GET_CACHE_ALIGNMENT(i)) + 2 * BYTES_PER_ICMSG_MESSAGE) +#define ICMSG_BUFFER_OVERHEAD(i) \ + (PBUF_HEADER_OVERHEAD(GET_CACHE_ALIGNMENT(i)) + 2 * BYTES_PER_ICMSG_MESSAGE) /** * Returns required block alignment for instance "i". */ -#define GET_CACHE_ALIGNMENT(i) \ - MAX(sizeof(uint32_t), DT_INST_PROP_OR(i, dcache_alignment, 0)) +#define GET_CACHE_ALIGNMENT(i) (sizeof(uint32_t)) /** * Calculates minimum size required for ICMsg region for specific number of local @@ -1247,138 +796,152 @@ const static struct ipc_service_backend backend_ops = { * because it can hold data message for each local block and release message * for each remote block. */ -#define GET_ICMSG_MIN_SIZE(i, local_blocks, remote_blocks) \ - (ICMSG_BUFFER_OVERHEAD(i) + BYTES_PER_ICMSG_MESSAGE * \ - (local_blocks + remote_blocks)) +#define GET_ICMSG_MIN_SIZE(i, local_blocks, remote_blocks) \ + (ICMSG_BUFFER_OVERHEAD(i) + BYTES_PER_ICMSG_MESSAGE * \ + (local_blocks + remote_blocks)) /** * Calculate aligned block size by evenly dividing remaining space after removing * the space for ICMsg. */ -#define GET_BLOCK_SIZE(i, total_size, local_blocks, remote_blocks) ROUND_DOWN( \ - ((total_size) - GET_ICMSG_MIN_SIZE(i, (local_blocks), (remote_blocks))) / \ - (local_blocks), GET_CACHE_ALIGNMENT(i)) +#define GET_BLOCK_SIZE(i, total_size, local_blocks, remote_blocks) ROUND_DOWN( \ + ((total_size) - GET_ICMSG_MIN_SIZE(i, (local_blocks), (remote_blocks))) / \ + (local_blocks), GET_CACHE_ALIGNMENT(i)) /** * Calculate offset where area for blocks starts which is just after the ICMsg. */ -#define GET_BLOCKS_OFFSET(i, total_size, local_blocks, remote_blocks) \ - ((total_size) - GET_BLOCK_SIZE(i, (total_size), (local_blocks), \ - (remote_blocks)) * (local_blocks)) +#define GET_BLOCKS_OFFSET(i, total_size, local_blocks, remote_blocks) \ + ((total_size) - GET_BLOCK_SIZE(i, (total_size), (local_blocks), \ + (remote_blocks)) * (local_blocks)) /** * Return shared memory start address aligned to block alignment and cache line. */ -#define GET_MEM_ADDR_INST(i, direction) \ - ROUND_UP(DT_REG_ADDR(DT_INST_PHANDLE(i, direction##_region)), \ - GET_CACHE_ALIGNMENT(i)) +#define GET_MEM_ADDR_INST(i, direction) \ + ROUND_UP(ipc ## i ## _ ## direction ## _start, GET_CACHE_ALIGNMENT(i)) /** * Return shared memory end address aligned to block alignment and cache line. */ -#define GET_MEM_END_INST(i, direction) \ - ROUND_DOWN(DT_REG_ADDR(DT_INST_PHANDLE(i, direction##_region)) + \ - DT_REG_SIZE(DT_INST_PHANDLE(i, direction##_region)), \ - GET_CACHE_ALIGNMENT(i)) +#define GET_MEM_END_INST(i, direction) \ + ROUND_DOWN(ipc ## i ## _ ## direction ## _end, GET_CACHE_ALIGNMENT(i)) /** * Return shared memory size aligned to block alignment and cache line. */ -#define GET_MEM_SIZE_INST(i, direction) \ - (GET_MEM_END_INST(i, direction) - GET_MEM_ADDR_INST(i, direction)) +#define GET_MEM_SIZE_INST(i, direction) \ + (GET_MEM_END_INST(i, direction) - GET_MEM_ADDR_INST(i, direction)) /** * Returns GET_ICMSG_SIZE, but for specific instance and direction. * 'loc' and 'rem' parameters tells the direction. They can be either "tx, rx" * or "rx, tx". */ -#define GET_ICMSG_SIZE_INST(i, loc, rem) \ - GET_BLOCKS_OFFSET( \ - i, \ - GET_MEM_SIZE_INST(i, loc), \ - DT_INST_PROP(i, loc##_blocks), \ - DT_INST_PROP(i, rem##_blocks)) +#define GET_ICMSG_SIZE_INST(i, loc, rem) \ + GET_BLOCKS_OFFSET(i, \ + GET_MEM_SIZE_INST(i, loc), \ + loc ## _BLOCKS_NUM, \ + rem ## _BLOCKS_NUM) /** * Returns address where area for blocks starts for specific instance and direction. * 'loc' and 'rem' parameters tells the direction. They can be either "tx, rx" * or "rx, tx". */ -#define GET_BLOCKS_ADDR_INST(i, loc, rem) \ - GET_MEM_ADDR_INST(i, loc) + \ - GET_BLOCKS_OFFSET( \ - i, \ - GET_MEM_SIZE_INST(i, loc), \ - DT_INST_PROP(i, loc##_blocks), \ - DT_INST_PROP(i, rem##_blocks)) +#define GET_BLOCKS_ADDR_INST(i, loc, rem) \ + GET_MEM_ADDR_INST(i, loc) + \ + GET_ICMSG_SIZE_INST(i, loc, rem) /** * Returns block size for specific instance and direction. * 'loc' and 'rem' parameters tells the direction. They can be either "tx, rx" * or "rx, tx". */ -#define GET_BLOCK_SIZE_INST(i, loc, rem) \ - GET_BLOCK_SIZE( \ - i, \ - GET_MEM_SIZE_INST(i, loc), \ - DT_INST_PROP(i, loc##_blocks), \ - DT_INST_PROP(i, rem##_blocks)) - -#define DEFINE_BACKEND_DEVICE(i) \ - SYS_BITARRAY_DEFINE_STATIC(tx_usage_bitmap_##i, DT_INST_PROP(i, tx_blocks)); \ - SYS_BITARRAY_DEFINE_STATIC(rx_hold_bitmap_##i, DT_INST_PROP(i, rx_blocks)); \ - PBUF_DEFINE(tx_icbmsg_pb_##i, \ - GET_MEM_ADDR_INST(i, tx), \ - GET_ICMSG_SIZE_INST(i, tx, rx), \ - GET_CACHE_ALIGNMENT(i)); \ - PBUF_DEFINE(rx_icbmsg_pb_##i, \ - GET_MEM_ADDR_INST(i, rx), \ - GET_ICMSG_SIZE_INST(i, rx, tx), \ - GET_CACHE_ALIGNMENT(i)); \ - static struct backend_data backend_data_##i = { \ - .control_data = { \ - .tx_pb = &tx_icbmsg_pb_##i, \ - .rx_pb = &rx_icbmsg_pb_##i, \ - } \ - }; \ - static const struct icbmsg_config backend_config_##i = \ - { \ - .control_config = { \ - .mbox_tx = MBOX_DT_SPEC_INST_GET(i, tx), \ - .mbox_rx = MBOX_DT_SPEC_INST_GET(i, rx), \ - }, \ - .tx = { \ - .blocks_ptr = (uint8_t *)GET_BLOCKS_ADDR_INST(i, tx, rx), \ - .block_count = DT_INST_PROP(i, tx_blocks), \ - .block_size = GET_BLOCK_SIZE_INST(i, tx, rx), \ - }, \ - .rx = { \ - .blocks_ptr = (uint8_t *)GET_BLOCKS_ADDR_INST(i, rx, tx), \ - .block_count = DT_INST_PROP(i, rx_blocks), \ - .block_size = GET_BLOCK_SIZE_INST(i, rx, tx), \ - }, \ - .tx_usage_bitmap = &tx_usage_bitmap_##i, \ - .rx_hold_bitmap = &rx_hold_bitmap_##i, \ - }; \ - BUILD_ASSERT(IS_POWER_OF_TWO(GET_CACHE_ALIGNMENT(i)), \ - "This module supports only power of two cache alignment"); \ - BUILD_ASSERT((GET_BLOCK_SIZE_INST(i, tx, rx) > GET_CACHE_ALIGNMENT(i)) && \ - (GET_BLOCK_SIZE_INST(i, tx, rx) < \ - GET_MEM_SIZE_INST(i, tx)), \ - "TX region is too small for provided number of blocks"); \ - BUILD_ASSERT((GET_BLOCK_SIZE_INST(i, rx, tx) > GET_CACHE_ALIGNMENT(i)) && \ - (GET_BLOCK_SIZE_INST(i, rx, tx) < \ - GET_MEM_SIZE_INST(i, rx)), \ - "RX region is too small for provided number of blocks"); \ - BUILD_ASSERT(DT_INST_PROP(i, rx_blocks) <= 256, "Too many RX blocks"); \ - BUILD_ASSERT(DT_INST_PROP(i, tx_blocks) <= 256, "Too many TX blocks"); \ - DEVICE_DT_INST_DEFINE(i, \ - &backend_init, \ - NULL, \ - &backend_data_##i, \ - &backend_config_##i, \ - POST_KERNEL, \ - CONFIG_IPC_SERVICE_REG_BACKEND_PRIORITY, \ - &backend_ops); - -DT_INST_FOREACH_STATUS_OKAY(DEFINE_BACKEND_DEVICE) +#define GET_BLOCK_SIZE_INST(i, loc, rem) \ + GET_BLOCK_SIZE(i, \ + GET_MEM_SIZE_INST(i, loc), \ + loc ## _BLOCKS_NUM, \ + rem ## _BLOCKS_NUM) + +#define IPC_INSTANCE_INIT(i) (struct ipc_instance) { \ + .tx_pb = { \ + .cfg = PBUF_CFG_INIT(GET_MEM_ADDR_INST(i, tx), \ + GET_ICMSG_SIZE_INST(i, tx, rx), \ + GET_CACHE_ALIGNMENT(i)), \ + }, \ + .rx_pb = { \ + .cfg = PBUF_CFG_INIT(GET_MEM_ADDR_INST(i, rx), \ + GET_ICMSG_SIZE_INST(i, rx, tx), \ + GET_CACHE_ALIGNMENT(i)), \ + }, \ + .tx = { \ + .blocks_ptr = (uint8_t *)GET_BLOCKS_ADDR_INST(i, tx, rx), \ + .block_count = TX_BLOCKS_NUM, \ + .block_size = GET_BLOCK_SIZE_INST(i, tx, rx), \ + }, \ + .rx = { \ + .blocks_ptr = (uint8_t *)GET_BLOCKS_ADDR_INST(i, rx, tx), \ + .block_count = RX_BLOCKS_NUM, \ + .block_size = GET_BLOCK_SIZE_INST(i, rx, tx), \ + }, \ +} + +/** + * Backend initialization. + */ +int +ipc_open(uint8_t ipc_id) +{ + int rc; + struct ipc_instance *ipc; + + assert(ipc_id < sizeof(ipc_instances)); + + ipc = &ipc_instances[ipc_id]; + *ipc = IPC_INSTANCE_INIT(0); + + assert(ipc->state == ICMSG_STATE_OFF); + ipc->state = ICMSG_STATE_BUSY; + + memset(ipc->tx_usage_bitmap, 0, DIV_ROUND_UP(TX_BLOCKS_NUM, 8)); + memset(ipc->rx_usage_bitmap, 0, DIV_ROUND_UP(RX_BLOCKS_NUM, 8)); + + ipc->is_initiator = (ipc->rx.blocks_ptr < ipc->tx.blocks_ptr); + + rc = pbuf_init(&ipc->tx_pb); + assert(rc == 0); + + /* Initialize local copies of rx_pb. */ + ipc->rx_pb.data.wr_idx = 0; + ipc->rx_pb.data.rd_idx = 0; + + rc = pbuf_write(&ipc->tx_pb, (char *)magic, sizeof(magic)); + assert(rc == sizeof(magic)); + + return 0; +} + +uint8_t +ipc_ready(uint8_t ipc_id) +{ + struct ipc_instance *ipc = &ipc_instances[ipc_id]; + + if (ipc->state == ICMSG_STATE_READY) { + return 1; + } + + return 0; +} + +uint8_t +ipc_icsmsg_ept_ready(uint8_t ipc_id, uint8_t ept_addr) +{ + struct ipc_instance *ipc = &ipc_instances[ipc_id]; + + if (ipc->ept[ept_addr].state == EPT_READY) { + return 1; + } + + return 0; +} diff --git a/hw/drivers/ipc/icbmsg/src/pbuf.c b/hw/drivers/ipc/icbmsg/src/pbuf.c index 1164f814ca..b3dc52dae7 100644 --- a/hw/drivers/ipc/icbmsg/src/pbuf.c +++ b/hw/drivers/ipc/icbmsg/src/pbuf.c @@ -4,220 +4,202 @@ * SPDX-License-Identifier: Apache-2.0 */ -#include #include #include -#include -#include -#include +#include +#include +#include "pbuf.h" +#include "utils.h" /* Helper funciton for getting numer of bytes being written to the bufer. */ -static uint32_t idx_occupied(uint32_t len, uint32_t wr_idx, uint32_t rd_idx) +static uint32_t +idx_occupied(uint32_t len, uint32_t wr_idx, uint32_t rd_idx) { - /* It is implicitly assumed wr_idx and rd_idx cannot differ by more then len. */ - return (rd_idx > wr_idx) ? (len - (rd_idx - wr_idx)) : (wr_idx - rd_idx); + /* It is implicitly assumed wr_idx and rd_idx cannot differ by more then len. */ + return (rd_idx > wr_idx) ? (len - (rd_idx - wr_idx)) : (wr_idx - rd_idx); } /* Helper function for wrapping the index from the begging if above buffer len. */ -static uint32_t idx_wrap(uint32_t len, uint32_t idx) +static uint32_t +idx_wrap(uint32_t len, uint32_t idx) { - return (idx >= len) ? (idx % len) : (idx); + return (idx >= len) ? (idx % len) : (idx); } -static int validate_cfg(const struct pbuf_cfg *cfg) +static int +validate_cfg(const struct pbuf_cfg *cfg) { - /* Validate pointers. */ - if (!cfg || !cfg->rd_idx_loc || !cfg->wr_idx_loc || !cfg->data_loc) { - return -EINVAL; - } - - /* Validate pointer alignment. */ - if (!IS_PTR_ALIGNED_BYTES(cfg->rd_idx_loc, MAX(cfg->dcache_alignment, _PBUF_IDX_SIZE)) || - !IS_PTR_ALIGNED_BYTES(cfg->wr_idx_loc, MAX(cfg->dcache_alignment, _PBUF_IDX_SIZE)) || - !IS_PTR_ALIGNED_BYTES(cfg->data_loc, _PBUF_IDX_SIZE)) { - return -EINVAL; - } - - /* Validate len. */ - if (cfg->len < _PBUF_MIN_DATA_LEN || !IS_PTR_ALIGNED_BYTES(cfg->len, _PBUF_IDX_SIZE)) { - return -EINVAL; - } - - /* Validate pointer values. */ - if (!(cfg->rd_idx_loc < cfg->wr_idx_loc) || - !((uint8_t *)cfg->wr_idx_loc < cfg->data_loc) || - !(((uint8_t *)cfg->rd_idx_loc + MAX(_PBUF_IDX_SIZE, cfg->dcache_alignment)) == - (uint8_t *)cfg->wr_idx_loc)) { - return -EINVAL; - } - - return 0; + /* Validate pointers. */ + if (!cfg || !cfg->rd_idx_loc || !cfg->wr_idx_loc || !cfg->data_loc) { + return -EINVAL; + } + + /* Validate pointer alignment. */ + if (!IS_PTR_ALIGNED_BYTES(cfg->rd_idx_loc, MAX(cfg->dcache_alignment, _PBUF_IDX_SIZE)) || + !IS_PTR_ALIGNED_BYTES(cfg->wr_idx_loc, MAX(cfg->dcache_alignment, _PBUF_IDX_SIZE)) || + !IS_PTR_ALIGNED_BYTES(cfg->data_loc, _PBUF_IDX_SIZE)) { + return -EINVAL; + } + + /* Validate len. */ + if (cfg->len < _PBUF_MIN_DATA_LEN || !IS_PTR_ALIGNED_BYTES(cfg->len, _PBUF_IDX_SIZE)) { + return -EINVAL; + } + + /* Validate pointer values. */ + if (!(cfg->rd_idx_loc < cfg->wr_idx_loc) || + !((uint8_t *)cfg->wr_idx_loc < cfg->data_loc) || + !(((uint8_t *)cfg->rd_idx_loc + MAX(_PBUF_IDX_SIZE, cfg->dcache_alignment)) == + (uint8_t *)cfg->wr_idx_loc)) { + return -EINVAL; + } + + return 0; } -int pbuf_init(struct pbuf *pb) +int +pbuf_init(struct pbuf *pb) { - if (validate_cfg(pb->cfg) != 0) { - return -EINVAL; - } + if (validate_cfg(&pb->cfg) != 0) { + return -EINVAL; + } - /* Initialize local copy of indexes. */ - pb->data.wr_idx = 0; - pb->data.rd_idx = 0; + /* Initialize local copy of indexes. */ + pb->data.wr_idx = 0; + pb->data.rd_idx = 0; - /* Clear shared memory. */ - *(pb->cfg->wr_idx_loc) = pb->data.wr_idx; - *(pb->cfg->rd_idx_loc) = pb->data.rd_idx; + /* Clear shared memory. */ + *(pb->cfg.wr_idx_loc) = pb->data.wr_idx; + *(pb->cfg.rd_idx_loc) = pb->data.rd_idx; - __sync_synchronize(); - - /* Take care cache. */ - sys_cache_data_flush_range((void *)(pb->cfg->wr_idx_loc), sizeof(*(pb->cfg->wr_idx_loc))); - sys_cache_data_flush_range((void *)(pb->cfg->rd_idx_loc), sizeof(*(pb->cfg->rd_idx_loc))); - - return 0; + return 0; } -int pbuf_write(struct pbuf *pb, const char *data, uint16_t len) +int +pbuf_write(struct pbuf *pb, const char *data, uint16_t len) { - if (pb == NULL || len == 0 || data == NULL) { - /* Incorrect call. */ - return -EINVAL; - } - - /* Invalidate rd_idx only, local wr_idx is used to increase buffer security. */ - sys_cache_data_invd_range((void *)(pb->cfg->rd_idx_loc), sizeof(*(pb->cfg->rd_idx_loc))); - __sync_synchronize(); - - uint8_t *const data_loc = pb->cfg->data_loc; - const uint32_t blen = pb->cfg->len; - uint32_t rd_idx = *(pb->cfg->rd_idx_loc); - uint32_t wr_idx = pb->data.wr_idx; - - /* wr_idx must always be aligned. */ - __ASSERT_NO_MSG(IS_PTR_ALIGNED_BYTES(wr_idx, _PBUF_IDX_SIZE)); - /* rd_idx shall always be aligned, but its value is received from the reader. - * Can not assert. - */ - if (!IS_PTR_ALIGNED_BYTES(rd_idx, _PBUF_IDX_SIZE)) { - return -EINVAL; - } - - uint32_t free_space = blen - idx_occupied(blen, wr_idx, rd_idx) - _PBUF_IDX_SIZE; - - /* Packet length, data + packet length size. */ - uint32_t plen = len + PBUF_PACKET_LEN_SZ; - - /* Check if packet will fit into the buffer. */ - if (free_space < plen) { - return -ENOMEM; - } - - /* Clear packet len with zeros and update. Clearing is done for possible versioning in the - * future. Writing is allowed now, because shared wr_idx value is updated at the very end. - */ - *((uint32_t *)(&data_loc[wr_idx])) = 0; - sys_put_be16(len, &data_loc[wr_idx]); - __sync_synchronize(); - sys_cache_data_flush_range(&data_loc[wr_idx], PBUF_PACKET_LEN_SZ); - - wr_idx = idx_wrap(blen, wr_idx + PBUF_PACKET_LEN_SZ); - - /* Write until end of the buffer, if data will be wrapped. */ - uint32_t tail = MIN(len, blen - wr_idx); - - memcpy(&data_loc[wr_idx], data, tail); - sys_cache_data_flush_range(&data_loc[wr_idx], tail); - - if (len > tail) { - /* Copy remaining data to buffer front. */ - memcpy(&data_loc[0], data + tail, len - tail); - sys_cache_data_flush_range(&data_loc[0], len - tail); - } - - wr_idx = idx_wrap(blen, ROUND_UP(wr_idx + len, _PBUF_IDX_SIZE)); - /* Update wr_idx. */ - pb->data.wr_idx = wr_idx; - *(pb->cfg->wr_idx_loc) = wr_idx; - __sync_synchronize(); - sys_cache_data_flush_range((void *)pb->cfg->wr_idx_loc, sizeof(*(pb->cfg->wr_idx_loc))); - - return len; + if (pb == NULL || len == 0 || data == NULL) { + /* Incorrect call. */ + return -EINVAL; + } + + uint8_t *const data_loc = pb->cfg.data_loc; + const uint32_t blen = pb->cfg.len; + uint32_t rd_idx = *(pb->cfg.rd_idx_loc); + uint32_t wr_idx = pb->data.wr_idx; + + /* wr_idx must always be aligned. */ + assert(IS_PTR_ALIGNED_BYTES(wr_idx, _PBUF_IDX_SIZE)); + /* rd_idx shall always be aligned, but its value is received from the reader. + * Can not assert. + */ + if (!IS_PTR_ALIGNED_BYTES(rd_idx, _PBUF_IDX_SIZE)) { + return -EINVAL; + } + + assert(wr_idx < blen); + uint32_t free_space = blen - idx_occupied(blen, wr_idx, rd_idx) - _PBUF_IDX_SIZE; + + /* Packet length, data + packet length size. */ + uint32_t plen = len + PBUF_PACKET_LEN_SZ; + + /* Check if packet will fit into the buffer. */ + if (free_space < plen) { + return -ENOMEM; + } + + /* Clear packet len with zeros and update. Clearing is done for possible versioning in the + * future. Writing is allowed now, because shared wr_idx value is updated at the very end. + */ + *((uint32_t *)(&data_loc[wr_idx])) = 0; + put_be16(&data_loc[wr_idx], len); + + wr_idx = idx_wrap(blen, wr_idx + PBUF_PACKET_LEN_SZ); + + /* Write until end of the buffer, if data will be wrapped. */ + uint32_t tail = MIN(len, blen - wr_idx); + + memcpy(&data_loc[wr_idx], data, tail); + + if (len > tail) { + /* Copy remaining data to buffer front. */ + memcpy(&data_loc[0], data + tail, len - tail); + } + + wr_idx = idx_wrap(blen, ROUND_UP(wr_idx + len, _PBUF_IDX_SIZE)); + /* Update wr_idx. */ + pb->data.wr_idx = wr_idx; + *(pb->cfg.wr_idx_loc) = wr_idx; + + return len; } -int pbuf_read(struct pbuf *pb, char *buf, uint16_t len) +int +pbuf_read(struct pbuf *pb, char *buf, uint16_t len) { - if (pb == NULL) { - /* Incorrect call. */ - return -EINVAL; - } - - /* Invalidate wr_idx only, local rd_idx is used to increase buffer security. */ - sys_cache_data_invd_range((void *)(pb->cfg->wr_idx_loc), sizeof(*(pb->cfg->wr_idx_loc))); - __sync_synchronize(); - - uint8_t *const data_loc = pb->cfg->data_loc; - const uint32_t blen = pb->cfg->len; - uint32_t wr_idx = *(pb->cfg->wr_idx_loc); - uint32_t rd_idx = pb->data.rd_idx; - - /* rd_idx must always be aligned. */ - __ASSERT_NO_MSG(IS_PTR_ALIGNED_BYTES(rd_idx, _PBUF_IDX_SIZE)); - /* wr_idx shall always be aligned, but its value is received from the - * writer. Can not assert. - */ - if (!IS_PTR_ALIGNED_BYTES(wr_idx, _PBUF_IDX_SIZE)) { - return -EINVAL; - } - - if (rd_idx == wr_idx) { - /* Buffer is empty. */ - return 0; - } - - /* Get packet len.*/ - sys_cache_data_invd_range(&data_loc[rd_idx], PBUF_PACKET_LEN_SZ); - uint16_t plen = sys_get_be16(&data_loc[rd_idx]); - - if (!buf) { - return (int)plen; - } - - if (plen > len) { - return -ENOMEM; - } - - uint32_t occupied_space = idx_occupied(blen, wr_idx, rd_idx); - - if (occupied_space < plen + PBUF_PACKET_LEN_SZ) { - /* This should never happen. */ - return -EAGAIN; - } - - rd_idx = idx_wrap(blen, rd_idx + PBUF_PACKET_LEN_SZ); - - /* Packet will fit into provided buffer, truncate len if provided len - * is bigger than necessary. - */ - len = MIN(plen, len); - - /* Read until end of the buffer, if data are wrapped. */ - uint32_t tail = MIN(blen - rd_idx, len); - - sys_cache_data_invd_range(&data_loc[rd_idx], tail); - memcpy(buf, &data_loc[rd_idx], tail); - - if (len > tail) { - sys_cache_data_invd_range(&data_loc[0], len - tail); - memcpy(&buf[tail], &pb->cfg->data_loc[0], len - tail); - } - - /* Update rd_idx. */ - rd_idx = idx_wrap(blen, ROUND_UP(rd_idx + len, _PBUF_IDX_SIZE)); - - pb->data.rd_idx = rd_idx; - *(pb->cfg->rd_idx_loc) = rd_idx; - __sync_synchronize(); - sys_cache_data_flush_range((void *)pb->cfg->rd_idx_loc, sizeof(*(pb->cfg->rd_idx_loc))); - - return len; + if (pb == NULL) { + /* Incorrect call. */ + return -EINVAL; + } + + uint8_t *const data_loc = pb->cfg.data_loc; + const uint32_t blen = pb->cfg.len; + uint32_t wr_idx = *(pb->cfg.wr_idx_loc); + uint32_t rd_idx = pb->data.rd_idx; + + /* rd_idx must always be aligned. */ + assert(IS_PTR_ALIGNED_BYTES(rd_idx, _PBUF_IDX_SIZE)); + /* wr_idx shall always be aligned, but its value is received from the + * writer. Can not assert. + */ + if (!IS_PTR_ALIGNED_BYTES(wr_idx, _PBUF_IDX_SIZE)) { + return -EINVAL; + } + + if (rd_idx == wr_idx) { + /* Buffer is empty. */ + return 0; + } + + /* Get packet len.*/ + uint16_t plen = get_be16(&data_loc[rd_idx]); + + if (!buf) { + return (int)plen; + } + + if (plen > len) { + return -ENOMEM; + } + + uint32_t occupied_space = idx_occupied(blen, wr_idx, rd_idx); + + if (occupied_space < plen + PBUF_PACKET_LEN_SZ) { + /* This should never happen. */ + return -EAGAIN; + } + + rd_idx = idx_wrap(blen, rd_idx + PBUF_PACKET_LEN_SZ); + + /* Packet will fit into provided buffer, truncate len if provided len + * is bigger than necessary. + */ + len = MIN(plen, len); + + /* Read until end of the buffer, if data are wrapped. */ + uint32_t tail = MIN(blen - rd_idx, len); + + memcpy(buf, &data_loc[rd_idx], tail); + + if (len > tail) { + memcpy(&buf[tail], &pb->cfg.data_loc[0], len - tail); + } + + /* Update rd_idx. */ + rd_idx = idx_wrap(blen, ROUND_UP(rd_idx + len, _PBUF_IDX_SIZE)); + + pb->data.rd_idx = rd_idx; + *(pb->cfg.rd_idx_loc) = rd_idx; + + return len; } diff --git a/hw/drivers/ipc/icbmsg/src/pbuf.h b/hw/drivers/ipc/icbmsg/src/pbuf.h index 0be5bd5185..5dd26108bf 100644 --- a/hw/drivers/ipc/icbmsg/src/pbuf.h +++ b/hw/drivers/ipc/icbmsg/src/pbuf.h @@ -7,17 +7,14 @@ #ifndef ZEPHYR_INCLUDE_IPC_PBUF_H_ #define ZEPHYR_INCLUDE_IPC_PBUF_H_ -#include -#include - #ifdef __cplusplus extern "C" { #endif /** - * @brief Packed buffer API + * @brief Packed buffer API * @defgroup pbuf Packed Buffer API - * @ingroup ipc + * @ingroup ipc * @{ */ @@ -40,20 +37,22 @@ extern "C" { * The structure contains configuration data. */ struct pbuf_cfg { - volatile uint32_t *rd_idx_loc; /* Address of the variable holding - * index value of the first valid byte - * in data[]. - */ - volatile uint32_t *wr_idx_loc; /* Address of the variable holding - * index value of the first free byte - * in data[]. - */ - uint32_t dcache_alignment; /* CPU data cache line size in bytes. - * Used for validation - TODO: To be - * replaced by flags. - */ - uint32_t len; /* Length of data[] in bytes. */ - uint8_t *data_loc; /* Location of the data[]. */ + /* Address of the variable holding index value of + * the first valid byte in data[]. + */ + volatile uint32_t *rd_idx_loc; + /* Address of the variable holding index value of + * the first free byte in data[]. + */ + volatile uint32_t *wr_idx_loc; + /* CPU data cache line size in bytes. Used for validation + * TODO: To be replaced by flags. + */ + uint32_t dcache_alignment; + /* Length of data[] in bytes. */ + uint32_t len; + /* Location of the data[]. */ + uint8_t *data_loc; }; /** @@ -63,17 +62,16 @@ struct pbuf_cfg { * reader respectively. */ struct pbuf_data { - volatile uint32_t wr_idx; /* Index of the first holding first - * free byte in data[]. Used for - * writing. - */ - volatile uint32_t rd_idx; /* Index of the first holding first - * valid byte in data[]. Used for - * reading. - */ + /* Index of the first holding first free byte in data[]. + * Used for writing. + */ + volatile uint32_t wr_idx; + /* Index of the first holding first valid byte in data[]. + * Used for reading. + */ + volatile uint32_t rd_idx; }; - /** * @brief Scure packed buffer. * @@ -87,12 +85,10 @@ struct pbuf_data { * written in a way to protect the data from being corrupted. */ struct pbuf { - const struct pbuf_cfg *const cfg; /* Configuration of the - * buffer. - */ - struct pbuf_data data; /* Data used to read and write - * to the buffer - */ + /* Configuration of the buffer. */ + struct pbuf_cfg cfg; + /* Data used to read and write to the buffer */ + struct pbuf_data data; }; /** @@ -101,55 +97,31 @@ struct pbuf { * It is recommended to use this macro to initialize packed buffer * configuration. * - * @param mem_addr Memory address for pbuf. - * @param size Size of the memory. - * @param dcache_align Data cache alignment. + * @param mem_addr Memory address for pbuf. + * @param size Size of the memory. + * @param dcache_align Data cache alignment. */ -#define PBUF_CFG_INIT(mem_addr, size, dcache_align) \ -{ \ - .rd_idx_loc = (uint32_t *)(mem_addr), \ - .wr_idx_loc = (uint32_t *)((uint8_t *)(mem_addr) + \ - MAX(dcache_align, _PBUF_IDX_SIZE)), \ - .data_loc = (uint8_t *)((uint8_t *)(mem_addr) + \ - MAX(dcache_align, _PBUF_IDX_SIZE) + _PBUF_IDX_SIZE), \ - .len = (uint32_t)((uint32_t)(size) - MAX(dcache_align, _PBUF_IDX_SIZE) - \ - _PBUF_IDX_SIZE), \ - .dcache_alignment = (dcache_align), \ -} +#define PBUF_CFG_INIT(mem_addr, size, dcache_align) \ + { \ + .rd_idx_loc = (uint32_t *)(mem_addr), \ + .wr_idx_loc = (uint32_t *)((uint8_t *)(mem_addr) + \ + MAX(dcache_align, _PBUF_IDX_SIZE)), \ + .data_loc = (uint8_t *)((uint8_t *)(mem_addr) + \ + MAX(dcache_align, _PBUF_IDX_SIZE) + _PBUF_IDX_SIZE), \ + .len = (uint32_t)((uint32_t)(size) - MAX(dcache_align, _PBUF_IDX_SIZE) - \ + _PBUF_IDX_SIZE), \ + .dcache_alignment = (dcache_align), \ + } /** * @brief Macro calculates memory overhead taken by the header in shared memory. * * It contains the read index, write index and padding. * - * @param dcache_align Data cache alignment. + * @param dcache_align Data cache alignment. */ #define PBUF_HEADER_OVERHEAD(dcache_align) \ - (MAX(dcache_align, _PBUF_IDX_SIZE) + _PBUF_IDX_SIZE) - -/** - * @brief Statically define and initialize pbuf. - * - * @param name Name of the pbuf. - * @param mem_addr Memory address for pbuf. - * @param size Size of the memory. - * @param dcache_align Data cache line size. - */ -#define PBUF_DEFINE(name, mem_addr, size, dcache_align) \ - BUILD_ASSERT(dcache_align >= 0, \ - "Cache line size must be non negative."); \ - BUILD_ASSERT((size) > 0 && IS_PTR_ALIGNED_BYTES(size, _PBUF_IDX_SIZE), \ - "Incorrect size."); \ - BUILD_ASSERT(IS_PTR_ALIGNED_BYTES(mem_addr, MAX(dcache_align, _PBUF_IDX_SIZE)), \ - "Misaligned memory."); \ - BUILD_ASSERT(size >= (MAX(dcache_align, _PBUF_IDX_SIZE) + _PBUF_IDX_SIZE + \ - _PBUF_MIN_DATA_LEN), "Insufficient size."); \ - \ - static const struct pbuf_cfg cfg_##name = \ - PBUF_CFG_INIT(mem_addr, size, dcache_align); \ - static struct pbuf name = { \ - .cfg = &cfg_##name, \ - } + (MAX(dcache_align, _PBUF_IDX_SIZE) + _PBUF_IDX_SIZE) /** * @brief Initialize the packet buffer. @@ -157,11 +129,9 @@ struct pbuf { * This function initializes the packet buffer based on provided configuration. * If the configuration is incorrect, the function will return error. * - * It is recommended to use PBUF_DEFINE macro for build time initialization. - * - * @param pb Pointer to the packed buffer containing - * configuration and data. Configuration has to be - * fixed before the initialization. + * @param pb Pointer to the packed buffer containing + * configuration and data. Configuration has to be + * fixed before the initialization. * @retval 0 on success. * @retval -EINVAL when the input parameter is incorrect. */ @@ -173,12 +143,12 @@ int pbuf_init(struct pbuf *pb); * This function call writes specified amount of data to the packet buffer if * the buffer will fit the data. * - * @param pb A buffer to which to write. - * @param buf Pointer to the data to be written to the buffer. - * @param len Number of bytes to be written to the buffer. Must be positive. - * @retval int Number of bytes written, negative error code on fail. - * -EINVAL, if any of input parameter is incorrect. - * -ENOMEM, if len is bigger than the buffer can fit. + * @param pb A buffer to which to write. + * @param buf Pointer to the data to be written to the buffer. + * @param len Number of bytes to be written to the buffer. Must be positive. + * @retval int Number of bytes written, negative error code on fail. + * -EINVAL, if any of input parameter is incorrect. + * -ENOMEM, if len is bigger than the buffer can fit. */ int pbuf_write(struct pbuf *pb, const char *buf, uint16_t len); @@ -189,15 +159,15 @@ int pbuf_write(struct pbuf *pb, const char *buf, uint16_t len); * Single read allows to read the message send by the single write. * The provided %p buf must be big enough to store the whole message. * - * @param pb A buffer from which data will be read. - * @param buf Data pointer to which read data will be written. - * If NULL, len of stored message is returned. - * @param len Number of bytes to be read from the buffer. - * @retval int Bytes read, negative error code on fail. - * Bytes to be read, if buf == NULL. - * -EINVAL, if any of input parameter is incorrect. - * -ENOMEM, if message can not fit in provided buf. - * -EAGAIN, if not whole message is ready yet. + * @param pb A buffer from which data will be read. + * @param buf Data pointer to which read data will be written. + * If NULL, len of stored message is returned. + * @param len Number of bytes to be read from the buffer. + * @retval int Bytes read, negative error code on fail. + * Bytes to be read, if buf == NULL. + * -EINVAL, if any of input parameter is incorrect. + * -ENOMEM, if message can not fit in provided buf. + * -EAGAIN, if not whole message is ready yet. */ int pbuf_read(struct pbuf *pb, char *buf, uint16_t len); diff --git a/hw/drivers/ipc/icbmsg/src/utils.h b/hw/drivers/ipc/icbmsg/src/utils.h new file mode 100644 index 0000000000..a5d6a49541 --- /dev/null +++ b/hw/drivers/ipc/icbmsg/src/utils.h @@ -0,0 +1,52 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +#ifndef _HW_DRIVERS_IPC_ICBMSG_UTILS_H +#define _HW_DRIVERS_IPC_ICBMSG_UTILS_H + +#ifdef __cplusplus +extern "C" { +#endif + +#define DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d)) + +/* Value of x rounded up to the next multiple of align. */ +#define ROUND_UP(x, align) \ + ((((unsigned long)(x) + ((unsigned long)(align) - 1)) / \ + (unsigned long)(align)) * (unsigned long)(align)) + +#define ROUND_DOWN(x, align) \ + (((unsigned long)(x) / (unsigned long)(align)) * (unsigned long)(align)) + +/* Check if a pointer is aligned for against a specific byte boundary */ +#define IS_PTR_ALIGNED_BYTES(ptr, bytes) ((((uintptr_t)ptr) % bytes) == 0) + +#ifndef MAX +#define MAX(a, b) (((a) > (b)) ? (a) : (b)) +#endif + +#ifndef MIN +#define MIN(a, b) (((a) < (b)) ? (a) : (b)) +#endif + +#ifdef __cplusplus +} +#endif + +#endif /* _HW_DRIVERS_IPC_ICBMSG_UTILS_H */ diff --git a/hw/drivers/ipc/icbmsg/syscfg.yml b/hw/drivers/ipc/icbmsg/syscfg.yml new file mode 100644 index 0000000000..8962f8be36 --- /dev/null +++ b/hw/drivers/ipc/icbmsg/syscfg.yml @@ -0,0 +1,23 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# + +syscfg.defs: + IPC_ICBMSG_NUM_EP: + description: > + Allowed number of endpoints within an IPC instance. + value: 2 diff --git a/hw/drivers/ipc/include/ipc/ipc.h b/hw/drivers/ipc/include/ipc/ipc.h new file mode 100644 index 0000000000..93c5910034 --- /dev/null +++ b/hw/drivers/ipc/include/ipc/ipc.h @@ -0,0 +1,41 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +#ifndef _HW_DRIVERS_IPC_H +#define _HW_DRIVERS_IPC_H + +#include +#include +#include + +#ifdef __cplusplus +extern "C" { +#endif + +void ipc_init(void); +int ipc_open(uint8_t ipc_id); +int ipc_signal(uint8_t ipc_id); +void ipc_process_signal(uint8_t ipc_id); +uint8_t ipc_ready(uint8_t ipc_id); + +#ifdef __cplusplus +} +#endif + +#endif /* _HW_DRIVERS_IPC_H */ diff --git a/hw/drivers/ipc/pkg.yml b/hw/drivers/ipc/pkg.yml new file mode 100644 index 0000000000..c79155ee65 --- /dev/null +++ b/hw/drivers/ipc/pkg.yml @@ -0,0 +1,32 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# + +pkg.name: hw/drivers/ipc +pkg.description: IPC driver +pkg.author: "Apache Mynewt " +pkg.homepage: "http://mynewt.apache.org/" +pkg.keywords: + - ipc + +pkg.deps: + - "@apache-mynewt-core/hw/mcu/nordic" + - "@apache-mynewt-core/kernel/os" + +pkg.init: + ipc_init: 10 diff --git a/hw/drivers/ipc/src/ipc.c b/hw/drivers/ipc/src/ipc.c new file mode 100644 index 0000000000..f54056ed3a --- /dev/null +++ b/hw/drivers/ipc/src/ipc.c @@ -0,0 +1,76 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +#include +#include +#include +#include +#include + +#define IPC_MAX_CHANS MYNEWT_VAL(IPC_CHANNELS) +#define IPC_SYNC_ID 0 + +/* IPC channels used for startup sync */ +#if MYNEWT_VAL(MCU_APP_CORE) +#define IPC_TX_SYNC_CHANNEL 0 +#define IPC_RX_SYNC_CHANNEL 1 +#else +#define IPC_TX_SYNC_CHANNEL 1 +#define IPC_RX_SYNC_CHANNEL 0 +#endif + +static void +ipc_cb(uint8_t channel) +{ + assert(channel == IPC_RX_SYNC_CHANNEL); + + os_trace_isr_enter(); + + ipc_process_signal(IPC_SYNC_ID); + + os_trace_isr_exit(); +} + +static void +ipc_common_init(void) +{ + hal_ipc_init(); + hal_ipc_register_callback(IPC_RX_SYNC_CHANNEL, ipc_cb); + ipc_open(IPC_SYNC_ID); + hal_ipc_enable_irq(IPC_RX_SYNC_CHANNEL, 1); + hal_ipc_start(); +} + +int +ipc_signal(uint8_t channel) +{ + return hal_ipc_signal(channel); +} + +void +ipc_init(void) +{ + ipc_common_init(); + + while (!ipc_ready(IPC_SYNC_ID)) { +#if MYNEWT_VAL(MCU_APP_CORE) + ipc_signal(IPC_TX_SYNC_CHANNEL); +#endif + } +} diff --git a/hw/drivers/ipc/syscfg.yml b/hw/drivers/ipc/syscfg.yml new file mode 100644 index 0000000000..101675cb62 --- /dev/null +++ b/hw/drivers/ipc/syscfg.yml @@ -0,0 +1,27 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# + +syscfg.defs: + IPC_CHANNELS: + description: > + Number of enabled IPC channels + value: 5 + range: 1..16 + +syscfg.vals.'BSP_NRF5340_NET || BSP_NRF54_RAD': + IPC_CHANNELS: 16 diff --git a/hw/hal/include/hal/hal_ipc.h b/hw/hal/include/hal/hal_ipc.h new file mode 100644 index 0000000000..88add13e16 --- /dev/null +++ b/hw/hal/include/hal/hal_ipc.h @@ -0,0 +1,51 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +/** + * @addtogroup HAL + * @{ + * @defgroup HALIPC HAL IPC + * @{ + */ + +#ifndef H_HAL_IPC_ +#define H_HAL_IPC_ + +#ifdef __cplusplus +extern "C" { +#endif + +typedef void (*hal_ipc_cb)(uint8_t channel); + +void hal_ipc_init(void); +void hal_ipc_start(void); +void hal_ipc_register_callback(uint8_t channel, hal_ipc_cb cb); +void hal_ipc_enable_irq(uint8_t channel, bool enable); +int hal_ipc_signal(uint8_t channel); + +#ifdef __cplusplus +} +#endif + +#endif /* H_HAL_IPC_ */ + +/** + * @} HALIPC + * @} HAL + */ diff --git a/hw/mcu/nordic/nrf5340_net/nrf5340_net.ld b/hw/mcu/nordic/nrf5340_net/nrf5340_net.ld index 74a20ff494..8544f58425 100644 --- a/hw/mcu/nordic/nrf5340_net/nrf5340_net.ld +++ b/hw/mcu/nordic/nrf5340_net/nrf5340_net.ld @@ -138,6 +138,22 @@ INCLUDE "link_tables.ld.h" *(.ipc) } > IPC + /* Section for IPC RX */ + .ipc0_rx (NOLOAD): + { + . = ALIGN(4); + *(.ipc0_rx) + . = ALIGN(4); + } > sram_ipc0_rx + + /* Section for IPC TX */ + .ipc0_tx (NOLOAD): + { + . = ALIGN(4); + *(.ipc0_tx) + . = ALIGN(4); + } > sram_ipc0_tx + /* This section will be zeroed by RTT package init */ .rtt (NOLOAD): { diff --git a/hw/mcu/nordic/nrf5340_net/pkg.yml b/hw/mcu/nordic/nrf5340_net/pkg.yml index 83146c6566..55fe9de50a 100644 --- a/hw/mcu/nordic/nrf5340_net/pkg.yml +++ b/hw/mcu/nordic/nrf5340_net/pkg.yml @@ -28,8 +28,13 @@ pkg.keywords: pkg.deps: - "@apache-mynewt-core/hw/mcu/nordic" - "@apache-mynewt-core/hw/mcu/nordic/nrf_common" + +pkg.deps.'BLE_TRANSPORT_HS == "nrf5340"': - "@apache-mynewt-core/hw/drivers/ipc_nrf5340" +pkg.deps.'BLE_TRANSPORT_HS == "ipc"': + - "@apache-mynewt-core/hw/drivers/ipc" + pkg.cflags.NFC_PINS_AS_GPIO: - '-DCONFIG_NFCT_PINS_AS_GPIOS=1' diff --git a/hw/mcu/nordic/nrf5340_net/src/hal_ipc.c b/hw/mcu/nordic/nrf5340_net/src/hal_ipc.c new file mode 100644 index 0000000000..39c8191c31 --- /dev/null +++ b/hw/mcu/nordic/nrf5340_net/src/hal_ipc.c @@ -0,0 +1,142 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +#include +#include +#include +#include +#include +#include +#include + +#ifndef BIT +#define BIT(n) (1UL << (n)) +#endif + +#define IPC_MAX_CHANS 4 + +static hal_ipc_cb cbs[IPC_MAX_CHANS]; + +int +hal_ipc_signal(uint8_t channel) +{ + assert(channel < IPC_MAX_CHANS); + + nrfx_ipc_signal(channel); + + return 0; +} + +void +hal_ipc_register_callback(uint8_t channel, hal_ipc_cb cb) +{ + assert(channel < IPC_MAX_CHANS); + + cbs[channel] = cb; +} + +void +hal_ipc_enable_irq(uint8_t channel, bool enable) +{ + assert(channel < IPC_MAX_CHANS); + + if (enable) { + NRF_IPC->RECEIVE_CNF[channel] = BIT(channel); + NRF_IPC->INTENSET = BIT(channel); + } else { + NRF_IPC->INTENCLR = BIT(channel); + NRF_IPC->RECEIVE_CNF[channel] = 0; + } +} + +static void +ipc_isr(void) +{ + uint32_t irq_pend; + uint8_t channel; + + os_trace_isr_enter(); + + /* Handle only interrupts that were enabled */ + irq_pend = NRF_IPC->INTPEND & NRF_IPC->INTEN; + + for (channel = 0; channel < IPC_MAX_CHANS; ++channel) { + if (irq_pend & BIT(channel)) { + NRF_IPC->EVENTS_RECEIVE[channel] = 0; + + if (cbs[channel] != NULL) { + cbs[channel](channel); + } + } + } + + os_trace_isr_exit(); +} + +void +hal_ipc_init(void) +{ + uint8_t i; + +#if MYNEWT_VAL(MCU_APP_CORE) + /* Make sure network core if off when we set up IPC */ + nrf_reset_network_force_off(NRF_RESET, true) + + if (MYNEWT_VAL(MCU_APP_SECURE) && !MYNEWT_VAL(IPC_NRF5340_PRE_TRUSTZONE_NETCORE_BOOT)) { + /* + * When bootloader is secure and application is not all peripherals are + * in unsecure mode. This is done by bootloader. + * If application runs in secure mode IPC manually chooses to use unsecure version + * so net core can always use same peripheral. + */ + NRF_SPU->PERIPHID[42].PERM &= ~SPU_PERIPHID_PERM_SECATTR_Msk; + } +#endif + + /* Enable IPC channels */ + for (i = 0; i < IPC_MAX_CHANS; ++i) { + NRF_IPC->SEND_CNF[i] = BIT(i); + NRF_IPC->RECEIVE_CNF[i] = 0; + } + + NRF_IPC->INTENCLR = 0xFFFF; + NVIC_ClearPendingIRQ(IPC_IRQn); + NVIC_SetVector(IPC_IRQn, (uint32_t)ipc_isr); + NVIC_EnableIRQ(IPC_IRQn); +} + +void +hal_ipc_start(void) +{ +#if MYNEWT_VAL(MCU_APP_CORE) + if (MYNEWT_VAL(MCU_APP_SECURE)) { + /* this allows netcore to access appcore RAM */ + NRF_SPU_S->EXTDOMAIN[0].PERM = SPU_EXTDOMAIN_PERM_SECATTR_Secure << SPU_EXTDOMAIN_PERM_SECATTR_Pos; + } + + /* Start Network Core */ + nrf_reset_network_force_off(NRF_RESET, false); + + /* + * Wait for NET core to start and init it's side of IPC. + * It may take several seconds if there is net core + * embedded image in the application flash. + */ +#endif +} diff --git a/hw/mcu/nordic/nrf5340_net/src/hal_vflash.c b/hw/mcu/nordic/nrf5340_net/src/hal_vflash.c index 8024ce4517..6962ffa8ba 100644 --- a/hw/mcu/nordic/nrf5340_net/src/hal_vflash.c +++ b/hw/mcu/nordic/nrf5340_net/src/hal_vflash.c @@ -27,7 +27,9 @@ #include #include #include +#if !MYNEWT_VAL(IPC_ICBMSG) #include +#endif #define NRF5340_NET_VFLASH_SECTOR_SZ 2048 @@ -218,7 +220,12 @@ nrf5340_net_vflash_init(const struct hal_flash *dev) const void *img_addr; uint32_t image_size; +#if MYNEWT_VAL(IPC_ICBMSG) + img_addr = 0; + image_size = 0; +#else img_addr = ipc_nrf5340_net_image_get(&image_size); +#endif /* * Application side IPC will set ipc_share data