diff --git a/platform/nephos/nephos-modules.mk b/platform/nephos/nephos-modules.mk new file mode 100644 index 000000000000..565e975e0a08 --- /dev/null +++ b/platform/nephos/nephos-modules.mk @@ -0,0 +1,10 @@ +# Nephos Platform modules + +VERSION = 1.0.0 + +NEPHOS_MODULE = nephos-modules_$(VERSION)_amd64.deb +$(NEPHOS_MODULE)_SRC_PATH = $(PLATFORM_PATH)/nephos-modules +$(NEPHOS_MODULE)_DEPENDS += $(LINUX_HEADERS) $(LINUX_HEADERS_COMMON) +SONIC_DPKG_DEBS += $(NEPHOS_MODULE) + +SONIC_STRETCH_DEBS += $(NEPHOS_MODULE) diff --git a/platform/nephos/nephos-modules/README.md b/platform/nephos/nephos-modules/README.md new file mode 100644 index 000000000000..6bb4cf6bf658 --- /dev/null +++ b/platform/nephos/nephos-modules/README.md @@ -0,0 +1,2 @@ +# Nephos-modules +Device drivers for support of Nephos platform for the SONiC project diff --git a/platform/nephos/nephos-modules/debian/changelog b/platform/nephos/nephos-modules/debian/changelog new file mode 100644 index 000000000000..3de2bd045efd --- /dev/null +++ b/platform/nephos/nephos-modules/debian/changelog @@ -0,0 +1,5 @@ +nephos-modules (1.0.0) unstable; urgency=low + + * Initial release + + -- Support Fri, 15 Mar 2019 15:54:00 +0800 diff --git a/platform/nephos/nephos-modules/debian/compat b/platform/nephos/nephos-modules/debian/compat new file mode 100644 index 000000000000..ec635144f600 --- /dev/null +++ b/platform/nephos/nephos-modules/debian/compat @@ -0,0 +1 @@ +9 diff --git a/platform/nephos/nephos-modules/debian/control b/platform/nephos/nephos-modules/debian/control new file mode 100644 index 000000000000..31c29e2e31e0 --- /dev/null +++ b/platform/nephos/nephos-modules/debian/control @@ -0,0 +1,11 @@ +Source: nephos-modules +Section: main +Priority: extra +Maintainer: support +Build-Depends: debhelper (>= 8.0.0), bzip2 +Standards-Version: 3.9.3 + +Package: nephos-modules +Architecture: amd64 +Depends: linux-image-4.9.0-9-amd64 +Description: kernel modules for nephos asic diff --git a/platform/nephos/nephos-modules/debian/rules b/platform/nephos/nephos-modules/debian/rules new file mode 100755 index 000000000000..f678ff308864 --- /dev/null +++ b/platform/nephos/nephos-modules/debian/rules @@ -0,0 +1,33 @@ +#!/usr/bin/make -f + +export INSTALL_MOD_DIR:=extra + +PACKAGE_NAME := nephos-modules +KVERSION ?= $(shell uname -r) +KERNEL_SRC := /lib/modules/$(KVERSION) +SERVICE_DIR := service +INITD_DIR := init.d +MODULE_SRC := $(shell pwd)/modules +CURRENT_DIR := $(cd "$(dirname "$0")"; pwd) + +%: + dh $@ + +override_dh_auto_build: + make -C $(MODULE_SRC) + +override_dh_auto_install: + dh_installdirs -p$(PACKAGE_NAME) $(KERNEL_SRC)/extra + cp $(MODULE_SRC)/build/module/*.ko debian/$(PACKAGE_NAME)/$(KERNEL_SRC)/extra + dh_installdirs -p$(PACKAGE_NAME) /lib/systemd/system + cp $(MODULE_SRC)/service/*.service debian/$(PACKAGE_NAME)/lib/systemd/system/ + dh_installdirs -p$(PACKAGE_NAME) /etc/init.d + cp $(MODULE_SRC)/init.d/* debian/$(PACKAGE_NAME)/etc/init.d/ + +override_dh_usrlocal: + +override_dh_pysupport: + +override_dh_clean: + dh_clean + test -d $(MODULE_SRC)/build || rm -rf $(MODULE_SRC)/build diff --git a/platform/nephos/nephos-modules/modules/Makefile b/platform/nephos/nephos-modules/modules/Makefile new file mode 100755 index 000000000000..8351996b04ea --- /dev/null +++ b/platform/nephos/nephos-modules/modules/Makefile @@ -0,0 +1,50 @@ +################################################################################ +# Copyright (C) 2019 Nephos, Inc. +# +# This program is free software; you can redistribute it and/or +# modify it under the terms of version 2 of the GNU General Public +# License as published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# version 2 along with this program. +################################################################################ +NPS_MODULES_DIR := $(dir $(realpath $(lastword $(MAKEFILE_LIST)))) +SRC_PATH := $(NPS_MODULES_DIR)/src +INC_PATH := $(SRC_PATH)/inc +################################################################################ +include $(NPS_MODULES_DIR)/config.mk +################################################################################ +MODULE_OUTPUT_DIR := $(BUILD_OUTPUT_DIR)/module +################################################################################ +all: compile install +################################################################################ +EXTRA_CFLAGS += -I$(INC_PATH) +EXTRA_CFLAGS += -DNPS_EN_NETIF +EXTRA_CFLAGS += -DNPS_EN_TAURUS +EXTRA_CFLAGS += -DNPS_LINUX_USER_MODE +EXTRA_CFLAGS += -DNPS_EN_LITTLE_ENDIAN +ifeq ($(shell uname -m),x86_64) +EXTRA_CFLAGS += -DNPS_EN_HOST_64_BIT_LITTLE_ENDIAN +else +EXTRA_CFLAGS += -DNPS_EN_HOST_32_BIT_LITTLE_ENDIAN +endif +################################################################################ +include $(SRC_PATH)/make.mk +################################################################################ +compile:: + +install:: + $(TEST_PATH) $(MODULE_OUTPUT_DIR) || $(MKDIR) $(MODULE_OUTPUT_DIR) + $(MV) $(BUILD_OUTPUT_DIR)/$(DEV_MODULE_NAME).ko $(MODULE_OUTPUT_DIR)/$(DEV_MODULE_NAME).ko + $(MV) $(BUILD_OUTPUT_DIR)/$(NETIF_MODULE_NAME).ko $(MODULE_OUTPUT_DIR)/$(NETIF_MODULE_NAME).ko + +clean:: + $(RM) $(BUILD_OUTPUT_DIR) + +.PHONY: all compile install clean +.NOTPARALLEL: all compile install clean diff --git a/platform/nephos/nephos-modules/modules/README b/platform/nephos/nephos-modules/modules/README new file mode 100644 index 000000000000..b2e6fb147936 --- /dev/null +++ b/platform/nephos/nephos-modules/modules/README @@ -0,0 +1,32 @@ +################################################################################ +# Copyright (C) 2019 Nephos, Inc. +# +# This program is free software; you can redistribute it and/or +# modify it under the terms of version 2 of the GNU General Public +# License as published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# version 2 along with this program. +################################################################################ +Step 1~4 show how to build and execute NPS kernel modules. + +1. Modify nps-modules/config.mk to specify the output directory to BUILD_OUTPUT_DIR. + The default output path is nps-modules/build. + +2. Compile: + cd nps-modules/ && make + +3. The output kernel modules will be found in $(BUILD_OUTPUT_DIR)/modules/ + - nps_dev.ko + - nps_netif.ko + +4. Load modules: + (1) insmod nps_dev.ko + (2) insmod nps_netif.ko + + Note that the module inserting sequence cannot be changed. diff --git a/platform/nephos/nephos-modules/modules/config.mk b/platform/nephos/nephos-modules/modules/config.mk new file mode 100755 index 000000000000..b7a106ea0a4b --- /dev/null +++ b/platform/nephos/nephos-modules/modules/config.mk @@ -0,0 +1,30 @@ +################################################################################ +# Copyright (C) 2019 Nephos, Inc. +# +# This program is free software; you can redistribute it and/or +# modify it under the terms of version 2 of the GNU General Public +# License as published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# version 2 along with this program. +################################################################################ +BUILD_OUTPUT_DIR := $(NPS_MODULES_DIR)/build +################################################################################ +#OS_PATH := /lib/modules/$(shell uname -r)/build +OS_PATH := /lib/modules/$(KVERSION)/build + +################################################################################ +MAKE := make +RM := rm -rf +MKDIR := mkdir -p +CP := cp +MV := mv +TEST_PATH := test -d +################################################################################ +export BUILD_OUTPUT_DIR +export OS_PATH diff --git a/platform/nephos/nephos-modules/modules/init.d/nps-modules-4.9.0-9-amd64 b/platform/nephos/nephos-modules/modules/init.d/nps-modules-4.9.0-9-amd64 new file mode 100755 index 000000000000..5907cc3a1df9 --- /dev/null +++ b/platform/nephos/nephos-modules/modules/init.d/nps-modules-4.9.0-9-amd64 @@ -0,0 +1,52 @@ +#!/bin/bash +# This script load/unload nps kernel modules + +### BEGIN INIT INFO +# Provides: load-nps-modules +# Required-Start: +# Required-Stop: +# Should-Start: +# Should-Stop: +# Default-Start: S +# Default-Stop: 0 6 +# Short-Description: Load nps kernel modules +### END INIT INFO + +case "$1" in +start) + echo -n "Load nps kernel modules... " + + RMEM_SIZE=`cat /proc/sys/net/core/rmem_max` + if [ $RMEM_SIZE -lt 8388608 ]; then + echo "8388608" > /proc/sys/net/core/rmem_max + fi + WMEM_SIZE=`cat /proc/sys/net/core/wmem_max` + if [ $WMEM_SIZE -lt 25165824 ]; then + echo "25165824" > /proc/sys/net/core/wmem_max + fi + + modprobe nps_dev + modprobe nps_netif + + echo "done." + ;; + +stop) + echo -n "Unload nps kernel modules... " + + rmmod nps_netif + rmmod nps_dev + echo "done." + ;; + +force-reload|restart) + echo "Not supported" + ;; + +*) + echo "Usage: /etc/init.d/nps-modules-4.9.0-9-amd64.init {start|stop}" + exit 1 + ;; +esac + +exit 0 diff --git a/platform/nephos/nephos-modules/modules/service/nps-modules-4.9.0-9-amd64.service b/platform/nephos/nephos-modules/modules/service/nps-modules-4.9.0-9-amd64.service new file mode 100644 index 000000000000..254dd31bba14 --- /dev/null +++ b/platform/nephos/nephos-modules/modules/service/nps-modules-4.9.0-9-amd64.service @@ -0,0 +1,13 @@ +[Unit] +Description=Nephos kernel modules init +After=local-fs.target +Before=syncd.service + +[Service] +Type=oneshot +ExecStart=-/etc/init.d/nps-modules-4.9.0-9-amd64 start +ExecStop=-/etc/init.d/nps-modules-4.9.0-9-amd64 stop +RemainAfterExit=yes + +[Install] +WantedBy=multi-user.target diff --git a/platform/nephos/nephos-modules/modules/src/hal_tau_pkt_knl.c b/platform/nephos/nephos-modules/modules/src/hal_tau_pkt_knl.c new file mode 100755 index 000000000000..b386da63e247 --- /dev/null +++ b/platform/nephos/nephos-modules/modules/src/hal_tau_pkt_knl.c @@ -0,0 +1,6147 @@ +/* Copyright (C) 2019 Nephos, Inc. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of version 2 of the GNU General Public + * License as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * version 2 along with this program. + */ + +/* FILE NAME: hal_tau_pkt_knl.c + * PURPOSE: + * To provide Linux kernel for PDMA TX/RX control. + * + * NOTES: + * + */ + +/***************************************************************************** + * INCLUDE FILE DECLARATIONS + ***************************************************************************** + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* netif */ +#include +#include +#include + +/* nps_sdk */ +#include + +/***************************************************************************** + * CHIP DEPENDENT VARIABLES + ***************************************************************************** + */ +/* Interrupt */ +#define HAL_TAU_PKT_ERR_REG(__unit__) (_hal_tau_pkt_intr_vec[0].intr_reg) +#define HAL_TAU_PKT_TCH_REG(__unit__, __channel__) (_hal_tau_pkt_intr_vec[1 + (__channel__)].intr_reg) +#define HAL_TAU_PKT_RCH_REG(__unit__, __channel__) (_hal_tau_pkt_intr_vec[5 + (__channel__)].intr_reg) + +#define HAL_TAU_PKT_ERR_EVENT(__unit__) (&_hal_tau_pkt_intr_vec[0].intr_event) +#define HAL_TAU_PKT_TCH_EVENT(__unit__, __channel__) (&_hal_tau_pkt_intr_vec[1 + (__channel__)].intr_event) +#define HAL_TAU_PKT_RCH_EVENT(__unit__, __channel__) (&_hal_tau_pkt_intr_vec[5 + (__channel__)].intr_event) + +#define HAL_TAU_PKT_ERR_CNT(__unit__) (_hal_tau_pkt_intr_vec[0].intr_cnt) +#define HAL_TAU_PKT_TCH_CNT(__unit__, __channel__) (_hal_tau_pkt_intr_vec[1 + (__channel__)].intr_cnt) +#define HAL_TAU_PKT_RCH_CNT(__unit__, __channel__) (_hal_tau_pkt_intr_vec[5 + (__channel__)].intr_cnt) + + +/* This flag value will be specified when user inserts kernel module. */ +#define HAL_TAU_PKT_DBG_ERR (0x1 << 0) +#define HAL_TAU_PKT_DBG_TX (0x1 << 1) +#define HAL_TAU_PKT_DBG_RX (0x1 << 2) +#define HAL_TAU_PKT_DBG_INTF (0x1 << 3) +#define HAL_TAU_PKT_DBG_PROFILE (0x1 << 4) +#define HAL_TAU_PKT_DBG_COMMON (0x1 << 5) + +/* Will be set when inserting kernel module */ +static UI32_T dbg_flag = 0; + +#define HAL_TAU_PKT_DBG(__flag__, ...) do \ +{ \ + if (0 != ((__flag__) & (dbg_flag))) \ + { \ + osal_printf(__VA_ARGS__); \ + } \ +}while (0) + +typedef struct +{ + UI32_T intr_reg; + NPS_SEMAPHORE_ID_T intr_event; + UI32_T intr_cnt; + +} HAL_TAU_PKT_INTR_VEC_T; + +typedef struct HAL_TAU_PKT_PROFILE_NODE_S +{ + HAL_TAU_PKT_NETIF_PROFILE_T *ptr_profile; + struct HAL_TAU_PKT_PROFILE_NODE_S *ptr_next_node; + +} HAL_TAU_PKT_PROFILE_NODE_T; + +typedef struct +{ + HAL_TAU_PKT_NETIF_INTF_T meta; + struct net_device *ptr_net_dev; + HAL_TAU_PKT_PROFILE_NODE_T *ptr_profile_list; /* the profiles binding to this interface */ + +} HAL_TAU_PKT_NETIF_PORT_DB_T; + + +static HAL_TAU_PKT_INTR_VEC_T _hal_tau_pkt_intr_vec[] = +{ + { /* 0: PDMA_ERR */ 1 << 0, 0x0, 0 }, + { /* 1: TX_CH0 */ 1 << 28, 0x0, 0 }, + { /* 2: TX_CH1 */ 1 << 29, 0x0, 0 }, + { /* 3: TX_CH2 */ 1 << 30, 0x0, 0 }, + { /* 4: TX_CH3 */ 1 << 31, 0x0, 0 }, + { /* 5: RX_CH0 */ 1 << 12, 0x0, 0 }, + { /* 6: RX_CH1 */ 1 << 13, 0x0, 0 }, + { /* 7: RX_CH2 */ 1 << 14, 0x0, 0 }, + { /* 8: RX_CH3 */ 1 << 15, 0x0, 0 }, +}; + +/***************************************************************************** + * NAMING CONSTANT DECLARATIONS + ***************************************************************************** + */ +/* Sleep Time Definitions */ +#define HAL_TAU_PKT_TX_DEQUE_SLEEP() osal_sleepThread(1000) /* us */ +#define HAL_TAU_PKT_RX_DEQUE_SLEEP() osal_sleepThread(1000) /* us */ +#define HAL_TAU_PKT_TX_ENQUE_RETRY_SLEEP() osal_sleepThread(1000) /* us */ +#define HAL_TAU_PKT_RX_ENQUE_RETRY_SLEEP() osal_sleepThread(1000) /* us */ +#define HAL_TAU_PKT_ALLOC_MEM_RETRY_SLEEP() osal_sleepThread(1000) /* us */ + +/* Network Device Definitions */ +#define HAL_TAU_PKT_TX_TIMEOUT (6*HZ) +#define HAL_TAU_PKT_MAX_ETH_FRAME_SIZE (HAL_TAU_PKT_RX_MAX_LEN) +#define HAL_TAU_PKT_MAX_PORT_NUM (HAL_TAU_PORT_NUM + 1) /* CPU port */ + +#define HAL_TAU_PKT_NET_PROFILE_NUM_MAX (256) + +static HAL_TAU_PKT_NETIF_PROFILE_T *_ptr_hal_tau_pkt_profile_entry[HAL_TAU_PKT_NET_PROFILE_NUM_MAX] = {0}; +static HAL_TAU_PKT_NETIF_PORT_DB_T _hal_tau_pkt_port_db[HAL_TAU_PKT_MAX_PORT_NUM]; + +/***************************************************************************** + * MACRO VLAUE DECLARATIONS + ***************************************************************************** + */ + +/***************************************************************************** + * MACRO FUNCTION DECLARATIONS + ***************************************************************************** + */ +/*---------------------------------------------------------------------------*/ +#define HAL_TAU_PKT_GET_DRV_CB_PTR(unit) (&_hal_tau_pkt_drv_cb[unit]) +/*---------------------------------------------------------------------------*/ +#define HAL_TAU_PKT_GET_TX_CB_PTR(unit) (&_hal_tau_pkt_tx_cb[unit]) +#define HAL_TAU_PKT_GET_TX_PDMA_PTR(unit, channel) (&_hal_tau_pkt_tx_cb[unit].pdma[channel]) +#define HAL_TAU_PKT_GET_TX_GPD_PTR(unit, channel, gpd) (&_hal_tau_pkt_tx_cb[unit].pdma[channel].ptr_gpd_align_start_addr[gpd]) +/*---------------------------------------------------------------------------*/ +#define HAL_TAU_PKT_GET_RX_CB_PTR(unit) (&_hal_tau_pkt_rx_cb[unit]) +#define HAL_TAU_PKT_GET_RX_PDMA_PTR(unit, channel) (&_hal_tau_pkt_rx_cb[unit].pdma[channel]) +#define HAL_TAU_PKT_GET_RX_GPD_PTR(unit, channel, gpd) (&_hal_tau_pkt_rx_cb[unit].pdma[channel].ptr_gpd_align_start_addr[gpd]) +/*---------------------------------------------------------------------------*/ +#define HAL_TAU_PKT_GET_PORT_DB(port) (&_hal_tau_pkt_port_db[port]) +#define HAL_TAU_PKT_GET_PORT_PROFILE_LIST(port) (_hal_tau_pkt_port_db[port].ptr_profile_list) +#define HAL_TAU_PKT_GET_PORT_NETDEV(port) _hal_tau_pkt_port_db[port].ptr_net_dev + +/***************************************************************************** + * DATA TYPE DECLARATIONS + ***************************************************************************** + */ +/* ----------------------------------------------------------------------------------- General structure */ +typedef struct +{ + UI32_T unit; + UI32_T channel; + +} HAL_TAU_PKT_ISR_COOKIE_T; + +typedef struct +{ + NPS_HUGE_T que_id; + NPS_SEMAPHORE_ID_T sema; + UI32_T len; /* Software CPU queue maximum length. */ + UI32_T weight; /* The weight for thread de-queue algorithm. */ + +} HAL_TAU_PKT_SW_QUEUE_T; + +typedef struct +{ + /* handleErrorTask */ + NPS_THREAD_ID_T err_task_id; + + /* INTR dispatcher */ + NPS_ISRLOCK_ID_T intr_lock; + UI32_T intr_bitmap; + +#define HAL_TAU_PKT_INIT_DRV (1 << 0) +#define HAL_TAU_PKT_INIT_TASK (1 << 1) +#define HAL_TAU_PKT_INIT_INTR (1 << 2) +#define HAL_TAU_PKT_INIT_RX_START (1 << 3) + /* a bitmap to record the init status */ + UI32_T init_flag; + +} HAL_TAU_PKT_DRV_CB_T; + +/* ----------------------------------------------------------------------------------- TX structure */ +typedef struct +{ + /* NPS_SEMAPHORE_ID_T sema; */ + + /* since the Tx GPD ring may be accessed by multiple process including + * ndo_start_xmit (SW IRQ), it must be protected with an ISRLOCK + * instead of the original semaphore + */ + NPS_ISRLOCK_ID_T ring_lock; + + UI32_T used_idx; /* SW send index = LAMP simulate the Tx HW index */ + UI32_T free_idx; /* SW free index */ + UI32_T used_gpd_num; + UI32_T free_gpd_num; + UI32_T gpd_num; + + HAL_TAU_PKT_TX_GPD_T *ptr_gpd_start_addr; + HAL_TAU_PKT_TX_GPD_T *ptr_gpd_align_start_addr; + BOOL_T err_flag; + + /* ASYNC */ + HAL_TAU_PKT_TX_SW_GPD_T **pptr_sw_gpd_ring; + HAL_TAU_PKT_TX_SW_GPD_T **pptr_sw_gpd_bulk; /* temporary store packets to be enque */ + + /* SYNC_INTR */ + NPS_SEMAPHORE_ID_T sync_intr_sema; + +} HAL_TAU_PKT_TX_PDMA_T; + +typedef struct +{ + HAL_TAU_PKT_TX_WAIT_T wait_mode; + HAL_TAU_PKT_TX_PDMA_T pdma[HAL_TAU_PKT_TX_CHANNEL_LAST]; + HAL_TAU_PKT_TX_CNT_T cnt; + + /* handleTxDoneTask */ + NPS_THREAD_ID_T isr_task_id[HAL_TAU_PKT_TX_CHANNEL_LAST]; + HAL_TAU_PKT_ISR_COOKIE_T isr_task_cookie[HAL_TAU_PKT_TX_CHANNEL_LAST]; + + /* txTask */ + HAL_TAU_PKT_SW_QUEUE_T sw_queue; + NPS_SEMAPHORE_ID_T sync_sema; + NPS_THREAD_ID_T task_id; + BOOL_T running; /* TRUE when Init txTask + * FALSE when Destroy txTask + */ + +} HAL_TAU_PKT_TX_CB_T; + +/* ----------------------------------------------------------------------------------- RX structure */ +typedef struct +{ + NPS_SEMAPHORE_ID_T sema; + UI32_T cur_idx; /* SW free index */ + UI32_T gpd_num; + + HAL_TAU_PKT_RX_GPD_T *ptr_gpd_start_addr; + HAL_TAU_PKT_RX_GPD_T *ptr_gpd_align_start_addr; + BOOL_T err_flag; + struct sk_buff **pptr_skb_ring; +} HAL_TAU_PKT_RX_PDMA_T; + +typedef struct +{ + /* Rx system configuration */ + UI32_T buf_len; + + HAL_TAU_PKT_RX_SCHED_T sched_mode; + HAL_TAU_PKT_RX_PDMA_T pdma[HAL_TAU_PKT_RX_CHANNEL_LAST]; + HAL_TAU_PKT_RX_CNT_T cnt; + + /* handleRxDoneTask */ + NPS_THREAD_ID_T isr_task_id[HAL_TAU_PKT_RX_CHANNEL_LAST]; + HAL_TAU_PKT_ISR_COOKIE_T isr_task_cookie[HAL_TAU_PKT_RX_CHANNEL_LAST]; + + /* rxTask */ + HAL_TAU_PKT_SW_QUEUE_T sw_queue[HAL_TAU_PKT_RX_QUEUE_NUM]; + UI32_T deque_idx; + NPS_SEMAPHORE_ID_T sync_sema; + NPS_THREAD_ID_T task_id; + NPS_SEMAPHORE_ID_T deinit_sema; /* To sync-up the Rx-stop and thread flush queues */ + BOOL_T running; /* TRUE when rxStart + * FALSE when rxStop + */ + +} HAL_TAU_PKT_RX_CB_T; + +/* ----------------------------------------------------------------------------------- Network Device */ +struct net_device_priv +{ + struct net_device *ptr_net_dev; + struct net_device_stats stats; + UI32_T unit; + UI32_T id; + UI32_T port; + UI16_T vlan; + UI32_T speed; +}; + +typedef enum +{ + HAL_TAU_PKT_DEST_NETDEV = 0, + HAL_TAU_PKT_DEST_SDK, + HAL_TAU_PKT_DEST_DROP, + HAL_TAU_PKT_DEST_LAST +} HAL_TAU_PKT_DEST_T; + +/***************************************************************************** + * GLOBAL VARIABLE DECLARATIONS + ***************************************************************************** + */ + +/***************************************************************************** + * STATIC VARIABLE DECLARATIONS + ***************************************************************************** + */ +/*---------------------------------------------------------------------------*/ +static HAL_TAU_PKT_DRV_CB_T _hal_tau_pkt_drv_cb[NPS_CFG_MAXIMUM_CHIPS_PER_SYSTEM]; +static HAL_TAU_PKT_TX_CB_T _hal_tau_pkt_tx_cb[NPS_CFG_MAXIMUM_CHIPS_PER_SYSTEM]; +static HAL_TAU_PKT_RX_CB_T _hal_tau_pkt_rx_cb[NPS_CFG_MAXIMUM_CHIPS_PER_SYSTEM]; +/*---------------------------------------------------------------------------*/ + +/***************************************************************************** + * LOCAL SUBPROGRAM DECLARATIONS + ***************************************************************************** + */ +/* ----------------------------------------------------------------------------------- Interrupt */ +static NPS_ERROR_NO_T +_hal_tau_pkt_enableIntr( + const UI32_T unit, + const UI32_T intr_bitmap) +{ + HAL_TAU_PKT_DRV_CB_T *ptr_cb = HAL_TAU_PKT_GET_DRV_CB_PTR(unit); + NPS_IRQ_FLAGS_T irq_flag = 0; + UI32_T intr_en = 0; + + osal_takeIsrLock(&ptr_cb->intr_lock, &irq_flag); + osal_mdc_readPciReg(unit, HAL_TAU_PKT_GET_MMIO(HAL_TAU_PKT_CP_COMMON_INT_EN_HI), &intr_en, sizeof(intr_en)); + intr_en |= intr_bitmap; + osal_mdc_writePciReg(unit, HAL_TAU_PKT_GET_MMIO(HAL_TAU_PKT_CP_COMMON_INT_EN_HI), &intr_en, sizeof(intr_en)); + osal_giveIsrLock(&ptr_cb->intr_lock, &irq_flag); + + return (NPS_E_OK); +} + +static NPS_ERROR_NO_T +_hal_tau_pkt_disableIntr( + const UI32_T unit, + const UI32_T intr_bitmap) +{ + HAL_TAU_PKT_DRV_CB_T *ptr_cb = HAL_TAU_PKT_GET_DRV_CB_PTR(unit); + NPS_IRQ_FLAGS_T irq_flag = 0; + UI32_T intr_en = 0; + + osal_takeIsrLock(&ptr_cb->intr_lock, &irq_flag); + osal_mdc_readPciReg(unit, HAL_TAU_PKT_GET_MMIO(HAL_TAU_PKT_CP_COMMON_INT_EN_HI), &intr_en, sizeof(intr_en)); + intr_en &= ~intr_bitmap; + osal_mdc_writePciReg(unit, HAL_TAU_PKT_GET_MMIO(HAL_TAU_PKT_CP_COMMON_INT_EN_HI), &intr_en, sizeof(intr_en)); + osal_giveIsrLock(&ptr_cb->intr_lock, &irq_flag); + + return (NPS_E_OK); +} + +static NPS_ERROR_NO_T +_hal_tau_pkt_maskIntr( + const UI32_T unit, + const UI32_T intr_bitmap) +{ + HAL_TAU_PKT_DRV_CB_T *ptr_cb = HAL_TAU_PKT_GET_DRV_CB_PTR(unit); + NPS_IRQ_FLAGS_T irq_flag = 0; + + osal_takeIsrLock(&ptr_cb->intr_lock, &irq_flag); + osal_mdc_writePciReg(unit, HAL_TAU_PKT_GET_MMIO(HAL_TAU_PKT_CP_COMMON_INT_MASK_CLR_HI), &intr_bitmap, sizeof(intr_bitmap)); + osal_giveIsrLock(&ptr_cb->intr_lock, &irq_flag); + + return (NPS_E_OK); +} + +static NPS_ERROR_NO_T +_hal_tau_pkt_unmaskIntr( + const UI32_T unit, + const UI32_T intr_bitmap) +{ + HAL_TAU_PKT_DRV_CB_T *ptr_cb = HAL_TAU_PKT_GET_DRV_CB_PTR(unit); + NPS_IRQ_FLAGS_T irq_flag = 0; + + osal_takeIsrLock(&ptr_cb->intr_lock, &irq_flag); + osal_mdc_writePciReg(unit, HAL_TAU_PKT_GET_MMIO(HAL_TAU_PKT_CP_COMMON_INT_MASK_SET_HI), &intr_bitmap, sizeof(intr_bitmap)); + osal_giveIsrLock(&ptr_cb->intr_lock, &irq_flag); + + return (NPS_E_OK); +} + +static NPS_ERROR_NO_T +_hal_tau_pkt_dispatcher( + void *ptr_cookie) +{ + UI32_T unit = (UI32_T)((NPS_HUGE_T)ptr_cookie); + HAL_TAU_PKT_DRV_CB_T *ptr_cb = HAL_TAU_PKT_GET_DRV_CB_PTR(unit); + NPS_IRQ_FLAGS_T irq_flag = 0; + + UI32_T idx = 0, vec = sizeof(_hal_tau_pkt_intr_vec) / sizeof(HAL_TAU_PKT_INTR_VEC_T); + UI32_T intr_mask = ptr_cb->intr_bitmap; + UI32_T intr_unmask = 0; + UI32_T intr_status = 0; + + /* MASK, READ and CLEAR PKT IRQs */ + osal_takeIsrLock(&ptr_cb->intr_lock, &irq_flag); + osal_mdc_writePciReg(unit, HAL_TAU_PKT_GET_MMIO(HAL_TAU_PKT_CP_COMMON_INT_MASK_CLR_HI), &intr_mask, sizeof(UI32_T)); + osal_mdc_readPciReg (unit, HAL_TAU_PKT_GET_MMIO(HAL_TAU_PKT_CP_COMMON_INT_STAT_HI), &intr_status, sizeof(UI32_T)); + intr_status = intr_status & intr_mask; + osal_mdc_writePciReg(unit, HAL_TAU_PKT_GET_MMIO(HAL_TAU_PKT_CP_COMMON_INT_CLR_HI), &intr_status, sizeof(UI32_T)); + osal_giveIsrLock(&ptr_cb->intr_lock, &irq_flag); + + /* Module thread handle and unmask the interrupt */ + intr_unmask = intr_status ^ intr_mask; + if (0x0 != intr_status) + { + for (idx = 0; idx < vec; idx++) + { + if (_hal_tau_pkt_intr_vec[idx].intr_reg & intr_status) + { + osal_triggerEvent(&_hal_tau_pkt_intr_vec[idx].intr_event); + _hal_tau_pkt_intr_vec[idx].intr_cnt++; + } + } + } + + /* UNMASK other PKT IRQs */ + osal_takeIsrLock(&ptr_cb->intr_lock, &irq_flag); + osal_mdc_writePciReg(unit, HAL_TAU_PKT_GET_MMIO(HAL_TAU_PKT_CP_COMMON_INT_MASK_SET_HI), &intr_unmask, sizeof(UI32_T)); + osal_giveIsrLock(&ptr_cb->intr_lock, &irq_flag); + + return (NPS_E_OK); +} + +/* ----------------------------------------------------------------------------------- RW HW Regs */ +/* FUNCTION NAME: _hal_tau_pkt_startTxChannelReg + * PURPOSE: + * To issue "START" command to the target TX channel. + * INPUT: + * unit -- The unit ID + * channel -- The target TX channel + * gpd_num -- The GPD ring length of the channel + * OUTPUT: + * None. + * RETURN: + * NPS_E_OK -- Successfully configure the register. + * NPS_E_OTHERS -- Configure the register failed. + * NOTES: + * None + */ +static NPS_ERROR_NO_T +_hal_tau_pkt_startTxChannelReg( + const UI32_T unit, + const HAL_TAU_PKT_TX_CHANNEL_T channel, + const UI32_T gpd_num) +{ + HAL_TAU_PKT_TCH_CMD_REG_T tch_cmd; + + tch_cmd.reg = 0x0; + tch_cmd.field.tch_start = 0x1; + tch_cmd.field.tch_gpd_add_no_lo = gpd_num & 0xff; + tch_cmd.field.tch_gpd_add_no_hi = (gpd_num & 0xff00) >> 8; + + osal_mdc_writePciReg(unit, + HAL_TAU_PKT_GET_PDMA_TCH_REG(HAL_TAU_PKT_GET_MMIO(HAL_TAU_PKT_PDMA_TCH_CMD), channel), + &tch_cmd.reg, sizeof(HAL_TAU_PKT_TCH_CMD_REG_T)); + + return (NPS_E_OK); +} + +/* FUNCTION NAME: _hal_tau_pkt_startRxChannelReg + * PURPOSE: + * To issue "START" command to the target RX channel. + * INPUT: + * unit -- The unit ID + * channel -- The target RX channel + * gpd_num -- The GPD ring length of the channel + * OUTPUT: + * None + * RETURN: + * NPS_E_OK -- Successfully configure the register. + * NPS_E_OTHERS -- Configure the register failed. + * NOTES: + * None + */ +static NPS_ERROR_NO_T +_hal_tau_pkt_startRxChannelReg( + const UI32_T unit, + const HAL_TAU_PKT_RX_CHANNEL_T channel, + const UI32_T gpd_num) +{ + HAL_TAU_PKT_RCH_CMD_REG_T rch_cmd; + + rch_cmd.reg = 0x0; + rch_cmd.field.rch_start = 0x1; + rch_cmd.field.rch_gpd_add_no_lo = gpd_num & 0xff; + rch_cmd.field.rch_gpd_add_no_hi = (gpd_num & 0xff00) >> 8; + + osal_mdc_writePciReg(unit, + HAL_TAU_PKT_GET_PDMA_RCH_REG(HAL_TAU_PKT_GET_MMIO(HAL_TAU_PKT_PDMA_RCH_CMD), channel), + &rch_cmd.reg, sizeof(HAL_TAU_PKT_RCH_CMD_REG_T)); + + return (NPS_E_OK); +} + +/* FUNCTION NAME: _hal_tau_pkt_resumeTxChannelReg + * PURPOSE: + * To issue "RESUME" command to the target TX channel. + * INPUT: + * unit -- The unit ID + * channel -- The target TX channel + * gpd_num -- The GPD ring length of the channel + * OUTPUT: + * None + * RETURN: + * NPS_E_OK -- Successfully configure the register. + * NPS_E_OTHERS -- Configure the register failed. + * NOTES: + * None + */ +static NPS_ERROR_NO_T +_hal_tau_pkt_resumeTxChannelReg( + const UI32_T unit, + const HAL_TAU_PKT_TX_CHANNEL_T channel, + const UI32_T gpd_num) +{ + HAL_TAU_PKT_TCH_CMD_REG_T tch_cmd; + + tch_cmd.reg = 0x0; + tch_cmd.field.tch_resume = 0x1; + tch_cmd.field.tch_gpd_add_no_lo = gpd_num & 0xff; + tch_cmd.field.tch_gpd_add_no_hi = (gpd_num & 0xff00) >> 8; + + osal_mdc_writePciReg(unit, + HAL_TAU_PKT_GET_PDMA_TCH_REG(HAL_TAU_PKT_GET_MMIO(HAL_TAU_PKT_PDMA_TCH_CMD), channel), + &tch_cmd.reg, sizeof(HAL_TAU_PKT_TCH_CMD_REG_T)); + + return (NPS_E_OK); +} + +/* FUNCTION NAME: _hal_tau_pkt_resumeRxChannelReg + * PURPOSE: + * To issue "RESUME" command to the target RX channel. + * INPUT: + * unit -- The unit ID + * channel -- The target RX channel + * gpd_num -- The GPD ring length of the channel + * OUTPUT: + * None + * RETURN: + * NPS_E_OK -- Successfully configure the register. + * NPS_E_OTHERS -- Configure the register failed. + * NOTES: + * None + */ +static NPS_ERROR_NO_T +_hal_tau_pkt_resumeRxChannelReg( + const UI32_T unit, + const HAL_TAU_PKT_RX_CHANNEL_T channel, + const UI32_T gpd_num) +{ + HAL_TAU_PKT_RCH_CMD_REG_T rch_cmd; + + rch_cmd.reg = 0x0; + rch_cmd.field.rch_resume = 0x1; + rch_cmd.field.rch_gpd_add_no_lo = gpd_num & 0xff; + rch_cmd.field.rch_gpd_add_no_hi = (gpd_num & 0xff00) >> 8; + + osal_mdc_writePciReg(unit, + HAL_TAU_PKT_GET_PDMA_RCH_REG(HAL_TAU_PKT_GET_MMIO(HAL_TAU_PKT_PDMA_RCH_CMD), channel), + &rch_cmd.reg, sizeof(HAL_TAU_PKT_RCH_CMD_REG_T)); + + return (NPS_E_OK); +} + +/* FUNCTION NAME: _hal_tau_pkt_stopTxChannelReg + * PURPOSE: + * To issue "STOP" command to the target TX channel. + * INPUT: + * unit -- The unit ID + * channel -- The target TX channel + * OUTPUT: + * None + * RETURN: + * NPS_E_OK -- Successfully configure the register. + * NPS_E_OTHERS -- Configure the register failed. + * NOTES: + * None + */ +static NPS_ERROR_NO_T +_hal_tau_pkt_stopTxChannelReg( + const UI32_T unit, + const HAL_TAU_PKT_TX_CHANNEL_T channel) +{ + HAL_TAU_PKT_TCH_CMD_REG_T tch_cmd; + + tch_cmd.reg = 0x0; + tch_cmd.field.tch_stop = 0x1; + + osal_mdc_writePciReg(unit, + HAL_TAU_PKT_GET_PDMA_TCH_REG(HAL_TAU_PKT_GET_MMIO(HAL_TAU_PKT_PDMA_TCH_CMD), channel), + &tch_cmd.reg, sizeof(HAL_TAU_PKT_TCH_CMD_REG_T)); + + return (NPS_E_OK); +} + +/* FUNCTION NAME: _hal_tau_pkt_stopRxChannelReg + * PURPOSE: + * To issue "STOP" command to the target RX channel. + * INPUT: + * unit -- The unit ID + * channel -- The target RX channel + * OUTPUT: + * None + * RETURN: + * NPS_E_OK -- Successfully configure the register. + * NPS_E_OTHERS -- Configure the register failed. + * NOTES: + * None + */ +static NPS_ERROR_NO_T +_hal_tau_pkt_stopRxChannelReg( + const UI32_T unit, + const HAL_TAU_PKT_RX_CHANNEL_T channel) +{ + HAL_TAU_PKT_RCH_CMD_REG_T rch_cmd; + + rch_cmd.reg = 0x0; + rch_cmd.field.rch_stop = 0x1; + + osal_mdc_writePciReg(unit, + HAL_TAU_PKT_GET_PDMA_RCH_REG(HAL_TAU_PKT_GET_MMIO(HAL_TAU_PKT_PDMA_RCH_CMD), channel), + &rch_cmd.reg, sizeof(HAL_TAU_PKT_RCH_CMD_REG_T)); + + return (NPS_E_OK); +} + +/* ----------------------------------------------------------------------------------- Init HW Regs */ +/* FUNCTION NAME: _hal_tau_pkt_setTxGpdStartAddrReg + * PURPOSE: + * To configure the start address and the length of target GPD ring of TX channel. + * INPUT: + * unit -- The unit ID + * channel -- The target TX channel + * gpd_start_addr -- The start address of the GPD ring + * gpd_ring_sz -- The size of the GPD ring + * OUTPUT: + * None + * RETURN: + * NPS_E_OK -- Successfully configure the register. + * NPS_E_OTHERS -- Configure the register failed. + * NOTES: + * None + */ +static NPS_ERROR_NO_T +_hal_tau_pkt_setTxGpdStartAddrReg( + const UI32_T unit, + const HAL_TAU_PKT_TX_CHANNEL_T channel, + const NPS_ADDR_T gpd_start_addr, + const UI32_T gpd_ring_sz) +{ + NPS_ERROR_NO_T rc = NPS_E_OK; + UI32_T tch_gpd_ring_start_addr_lo = 0; + UI32_T tch_gpd_ring_start_addr_hi = 0; + UI32_T tch_gpd_ring_size = 0; + + /* Configure the low 32-bit address. */ + tch_gpd_ring_start_addr_lo = (UI32_T)NPS_ADDR_64_LOW(gpd_start_addr); + + rc = osal_mdc_writePciReg(unit, + HAL_TAU_PKT_GET_PDMA_TCH_REG(HAL_TAU_PKT_GET_MMIO(HAL_TAU_PKT_PDMA_TCH_GPD_RING_START_ADDR_LO), channel), + &tch_gpd_ring_start_addr_lo, sizeof(UI32_T)); + + /* Configure the high 32-bit address. */ + if (NPS_E_OK == rc) + { + tch_gpd_ring_start_addr_hi = (UI32_T)NPS_ADDR_64_HI(gpd_start_addr); + + rc = osal_mdc_writePciReg(unit, + HAL_TAU_PKT_GET_PDMA_TCH_REG(HAL_TAU_PKT_GET_MMIO(HAL_TAU_PKT_PDMA_TCH_GPD_RING_START_ADDR_HI), channel), + &tch_gpd_ring_start_addr_hi, sizeof(UI32_T)); + } + + /* Configure the GPD ring size. */ + if (NPS_E_OK == rc) + { + tch_gpd_ring_size = gpd_ring_sz; + + rc = osal_mdc_writePciReg(unit, + HAL_TAU_PKT_GET_PDMA_TCH_REG(HAL_TAU_PKT_GET_MMIO(HAL_TAU_PKT_PDMA_TCH_GPD_RING_SIZE), channel), + &tch_gpd_ring_size, sizeof(UI32_T)); + } + + return (rc); +} + +/* FUNCTION NAME: _hal_tau_pkt_setRxGpdStartAddrReg + * PURPOSE: + * To configure the start address and the length of target GPD ring of RX channel. + * INPUT: + * unit -- The unit ID + * channel -- The target RX channel + * gpd_start_addr -- The start address of the GPD ring + * gpd_ring_sz -- The size of the GPD ring + * OUTPUT: + * None + * RETURN: + * NPS_E_OK -- Successfully configure the register. + * NPS_E_OTHERS -- Configure the register failed. + * NOTES: + * None + */ +static NPS_ERROR_NO_T +_hal_tau_pkt_setRxGpdStartAddrReg( + const UI32_T unit, + const HAL_TAU_PKT_RX_CHANNEL_T channel, + const NPS_ADDR_T gpd_start_addr, + const UI32_T gpd_ring_sz) +{ + NPS_ERROR_NO_T rc = NPS_E_OK; + UI32_T rch_gpd_ring_start_addr_lo = 0; + UI32_T rch_gpd_ring_start_addr_hi = 0; + UI32_T rch_gpd_ring_size = 0; + + /* Configure the low 32-bit address. */ + rch_gpd_ring_start_addr_lo = (UI32_T)NPS_ADDR_64_LOW(gpd_start_addr); + + rc = osal_mdc_writePciReg(unit, + HAL_TAU_PKT_GET_PDMA_RCH_REG(HAL_TAU_PKT_GET_MMIO(HAL_TAU_PKT_PDMA_RCH_GPD_RING_START_ADDR_LO), channel), + &rch_gpd_ring_start_addr_lo, sizeof(UI32_T)); + + /* Configure the high 32-bit address. */ + if (NPS_E_OK == rc) + { + rch_gpd_ring_start_addr_hi = (UI32_T)NPS_ADDR_64_HI(gpd_start_addr); + + rc = osal_mdc_writePciReg(unit, + HAL_TAU_PKT_GET_PDMA_RCH_REG(HAL_TAU_PKT_GET_MMIO(HAL_TAU_PKT_PDMA_RCH_GPD_RING_START_ADDR_HI), channel), + &rch_gpd_ring_start_addr_hi, sizeof(UI32_T)); + } + + /* Configure the GPD ring size. */ + if (NPS_E_OK == rc) + { + rch_gpd_ring_size = gpd_ring_sz; + + rc = osal_mdc_writePciReg(unit, + HAL_TAU_PKT_GET_PDMA_RCH_REG(HAL_TAU_PKT_GET_MMIO(HAL_TAU_PKT_PDMA_RCH_GPD_RING_SIZE), channel), + &rch_gpd_ring_size, sizeof(UI32_T)); + } + + return (rc); +} + +/* ----------------------------------------------------------------------------------- ISR HW Regs */ +/* FUNCTION NAME: _hal_tau_pkt_maskAllTxL2IsrReg + * PURPOSE: + * To mask all the TX L2 interrupts for the specified channel. + * INPUT: + * unit -- The unit ID + * channel -- The target TX channel + * OUTPUT: + * None + * RETURN: + * NPS_E_OK -- Successfully mask all the TX L2 interrupts. + * NOTES: + * None + */ +static NPS_ERROR_NO_T +_hal_tau_pkt_maskAllTxL2IsrReg( + const UI32_T unit, + const HAL_TAU_PKT_TX_CHANNEL_T channel) +{ + UI32_T reg = 0; + + HAL_TAU_PKT_CLR_BITMAP(reg, + HAL_TAU_PKT_TX_CHANNEL_L2_ISR_GPD_HWO_ERROR | + HAL_TAU_PKT_TX_CHANNEL_L2_ISR_GPD_CHKSM_ERROR | + HAL_TAU_PKT_TX_CHANNEL_L2_ISR_GPD_NO_OVFL_ERROR | + HAL_TAU_PKT_TX_CHANNEL_L2_ISR_GPD_DMA_READ_ERROR | + HAL_TAU_PKT_TX_CHANNEL_L2_ISR_BUF_SIZE_ERROR | + HAL_TAU_PKT_TX_CHANNEL_L2_ISR_RUNT_ERROR | + HAL_TAU_PKT_TX_CHANNEL_L2_ISR_OVSZ_ERROR | + HAL_TAU_PKT_TX_CHANNEL_L2_ISR_LEN_MISMATCH_ERROR | + HAL_TAU_PKT_TX_CHANNEL_L2_ISR_PKTPL_DMA_READ_ERROR | + HAL_TAU_PKT_TX_CHANNEL_L2_ISR_COS_ERROR | + HAL_TAU_PKT_TX_CHANNEL_L2_ISR_GPD_GT255_ERROR | + HAL_TAU_PKT_TX_CHANNEL_L2_ISR_PFC | + HAL_TAU_PKT_TX_CHANNEL_L2_ISR_CREDIT_UDFL_ERROR | + HAL_TAU_PKT_TX_CHANNEL_L2_ISR_DMA_WRITE_ERROR | + HAL_TAU_PKT_TX_CHANNEL_L2_ISR_STOP_CMD_CPLT); + + osal_mdc_writePciReg(unit, + HAL_TAU_PKT_GET_PDMA_TCH_REG(HAL_TAU_PKT_GET_MMIO(HAL_TAU_PKT_PDMA_TCH_INT_MASK), channel), + ®, sizeof(UI32_T)); + + return (NPS_E_OK); +} + +/* FUNCTION NAME: _hal_tau_pkt_maskAllRxL2IsrReg + * PURPOSE: + * To mask all the L2 interrupts for the specified channel. + * INPUT: + * unit -- The unit ID + * channel -- The target RX channel + * OUTPUT: + * None + * RETURN: + * NPS_E_OK -- Successfully mask all the L2 interrupts. + * NOTES: + * None + */ +static NPS_ERROR_NO_T +_hal_tau_pkt_maskAllRxL2IsrReg( + const UI32_T unit, + const HAL_TAU_PKT_RX_CHANNEL_T channel) +{ + UI32_T reg = 0; + + HAL_TAU_PKT_CLR_BITMAP(reg, + HAL_TAU_PKT_RX_CHANNEL_L2_ISR_AVAIL_GPD_LOW | + HAL_TAU_PKT_RX_CHANNEL_L2_ISR_AVAIL_GPD_EMPTY | + HAL_TAU_PKT_RX_CHANNEL_L2_ISR_AVAIL_GPD_ERROR | + HAL_TAU_PKT_RX_CHANNEL_L2_ISR_GPD_CHKSM_ERROR | + HAL_TAU_PKT_RX_CHANNEL_L2_ISR_DMA_READ_ERROR | + HAL_TAU_PKT_RX_CHANNEL_L2_ISR_DMA_WRITE_ERROR | + HAL_TAU_PKT_RX_CHANNEL_L2_ISR_STOP_CMD_CPLT | + HAL_TAU_PKT_RX_CHANNEL_L2_ISR_GPD_GT255_ERROR | + HAL_TAU_PKT_RX_CHANNEL_L2_ISR_TOD_UNINIT | + HAL_TAU_PKT_RX_CHANNEL_L2_ISR_PKT_ERROR_DROP | + HAL_TAU_PKT_RX_CHANNEL_L2_ISR_UDSZ_DROP | + HAL_TAU_PKT_RX_CHANNEL_L2_ISR_OVSZ_DROP | + HAL_TAU_PKT_RX_CHANNEL_L2_ISR_CMDQ_OVF_DROP | + HAL_TAU_PKT_RX_CHANNEL_L2_ISR_FIFO_OVF_DROP); + + osal_mdc_writePciReg(unit, + HAL_TAU_PKT_GET_PDMA_RCH_REG(HAL_TAU_PKT_GET_MMIO(HAL_TAU_PKT_PDMA_RCH_INT_MASK), channel), + ®, sizeof(UI32_T)); + + return (NPS_E_OK); +} + +/* FUNCTION NAME: _hal_tau_pkt_unmaskAllTxL2IsrReg + * PURPOSE: + * To unmask all the TX L2 interrupts for the specified channel. + * INPUT: + * unit -- The unit ID + * channel -- The target TX channel + * OUTPUT: + * None + * RETURN: + * NPS_E_OK -- Successfully unmask all the TX L2 interrupts. + * NOTES: + * None + */ +static NPS_ERROR_NO_T +_hal_tau_pkt_unmaskAllTxL2IsrReg( + const UI32_T unit, + const HAL_TAU_PKT_TX_CHANNEL_T channel) +{ + UI32_T reg = 0; + + HAL_TAU_PKT_SET_BITMAP(reg, + HAL_TAU_PKT_TX_CHANNEL_L2_ISR_GPD_HWO_ERROR | + HAL_TAU_PKT_TX_CHANNEL_L2_ISR_GPD_CHKSM_ERROR | + HAL_TAU_PKT_TX_CHANNEL_L2_ISR_GPD_NO_OVFL_ERROR | + HAL_TAU_PKT_TX_CHANNEL_L2_ISR_GPD_DMA_READ_ERROR | + HAL_TAU_PKT_TX_CHANNEL_L2_ISR_BUF_SIZE_ERROR | + HAL_TAU_PKT_TX_CHANNEL_L2_ISR_RUNT_ERROR | + HAL_TAU_PKT_TX_CHANNEL_L2_ISR_OVSZ_ERROR | + HAL_TAU_PKT_TX_CHANNEL_L2_ISR_LEN_MISMATCH_ERROR | + HAL_TAU_PKT_TX_CHANNEL_L2_ISR_PKTPL_DMA_READ_ERROR | + HAL_TAU_PKT_TX_CHANNEL_L2_ISR_COS_ERROR | + HAL_TAU_PKT_TX_CHANNEL_L2_ISR_GPD_GT255_ERROR | + HAL_TAU_PKT_TX_CHANNEL_L2_ISR_PFC | + HAL_TAU_PKT_TX_CHANNEL_L2_ISR_CREDIT_UDFL_ERROR | + HAL_TAU_PKT_TX_CHANNEL_L2_ISR_DMA_WRITE_ERROR | + HAL_TAU_PKT_TX_CHANNEL_L2_ISR_STOP_CMD_CPLT); + + osal_mdc_writePciReg(unit, + HAL_TAU_PKT_GET_PDMA_TCH_REG(HAL_TAU_PKT_GET_MMIO(HAL_TAU_PKT_PDMA_TCH_INT_MASK), channel), + ®, sizeof(UI32_T)); + + return (NPS_E_OK); +} + +/* FUNCTION NAME: _hal_tau_pkt_unmaskAllRxL2IsrReg + * PURPOSE: + * To unmask all the L2 interrupts for the specified channel. + * INPUT: + * unit -- The unit ID + * channel -- The target RX channel + * OUTPUT: + * None + * RETURN: + * NPS_E_OK -- Successfully unmask all the L2 interrupts. + * NOTES: + * None + */ +static NPS_ERROR_NO_T +_hal_tau_pkt_unmaskAllRxL2IsrReg( + const UI32_T unit, + const HAL_TAU_PKT_RX_CHANNEL_T channel) +{ + UI32_T reg = 0; + + HAL_TAU_PKT_SET_BITMAP(reg, + HAL_TAU_PKT_RX_CHANNEL_L2_ISR_AVAIL_GPD_LOW | + HAL_TAU_PKT_RX_CHANNEL_L2_ISR_AVAIL_GPD_EMPTY | + HAL_TAU_PKT_RX_CHANNEL_L2_ISR_AVAIL_GPD_ERROR | + HAL_TAU_PKT_RX_CHANNEL_L2_ISR_GPD_CHKSM_ERROR | + HAL_TAU_PKT_RX_CHANNEL_L2_ISR_DMA_READ_ERROR | + HAL_TAU_PKT_RX_CHANNEL_L2_ISR_DMA_WRITE_ERROR | + HAL_TAU_PKT_RX_CHANNEL_L2_ISR_STOP_CMD_CPLT | + HAL_TAU_PKT_RX_CHANNEL_L2_ISR_GPD_GT255_ERROR | + HAL_TAU_PKT_RX_CHANNEL_L2_ISR_TOD_UNINIT | + HAL_TAU_PKT_RX_CHANNEL_L2_ISR_PKT_ERROR_DROP | + HAL_TAU_PKT_RX_CHANNEL_L2_ISR_UDSZ_DROP | + HAL_TAU_PKT_RX_CHANNEL_L2_ISR_OVSZ_DROP | + HAL_TAU_PKT_RX_CHANNEL_L2_ISR_CMDQ_OVF_DROP | + HAL_TAU_PKT_RX_CHANNEL_L2_ISR_FIFO_OVF_DROP); + + osal_mdc_writePciReg(unit, + HAL_TAU_PKT_GET_PDMA_RCH_REG(HAL_TAU_PKT_GET_MMIO(HAL_TAU_PKT_PDMA_RCH_INT_MASK), channel), + ®, sizeof(UI32_T)); + + return (NPS_E_OK); +} + +/* FUNCTION NAME: _hal_tau_pkt_clearTxL2IsrStatusReg + * PURPOSE: + * To clear the status of TX L2 interrupts for the specified channel. + * INPUT: + * unit -- The unit ID + * channel -- The target TX channel + * isr_bitmap -- The bitmap used to specify the target ISRs + * OUTPUT: + * None + * RETURN: + * NPS_E_OK -- Successfully clear L1 ISR status. + * NOTES: + * None + */ +static NPS_ERROR_NO_T +_hal_tau_pkt_clearTxL2IsrStatusReg( + const UI32_T unit, + const HAL_TAU_PKT_TX_CHANNEL_T channel, + const HAL_TAU_PKT_TX_CHANNEL_L2_ISR_T isr_bitmap) +{ + UI32_T reg = 0; + + HAL_TAU_PKT_SET_BITMAP(reg, isr_bitmap); + + osal_mdc_writePciReg(unit, + HAL_TAU_PKT_GET_PDMA_TCH_REG(HAL_TAU_PKT_GET_MMIO(HAL_TAU_PKT_PDMA_TCH_INT_CLR), channel), + ®, sizeof(UI32_T)); + + return (NPS_E_OK); +} + +/* FUNCTION NAME: _hal_tau_pkt_clearRxL2IsrStatusReg + * PURPOSE: + * To clear the status of RX L2 interrupts for the specified channel. + * INPUT: + * unit -- The unit ID + * channel -- The target RX channel + * isr_bitmap -- The bitmap used to specify the target ISRs + * OUTPUT: + * None + * RETURN: + * NPS_E_OK -- Successfully clear RX L2 ISR status. + * NOTES: + * None + */ +static NPS_ERROR_NO_T +_hal_tau_pkt_clearRxL2IsrStatusReg( + const UI32_T unit, + const HAL_TAU_PKT_RX_CHANNEL_T channel, + const HAL_TAU_PKT_RX_CHANNEL_L2_ISR_T isr_bitmap) +{ + UI32_T reg = 0; + + HAL_TAU_PKT_SET_BITMAP(reg, isr_bitmap); + + osal_mdc_writePciReg(unit, + HAL_TAU_PKT_GET_PDMA_RCH_REG(HAL_TAU_PKT_GET_MMIO(HAL_TAU_PKT_PDMA_RCH_INT_CLR), channel), + ®, sizeof(UI32_T)); + + return (NPS_E_OK); +} + +/* FUNCTION NAME: hal_tau_pkt_getTxIntrCnt + * PURPOSE: + * To get the PDMA TX interrupt counters of the target channel. + * INPUT: + * unit -- The unit ID + * channel -- The target channel + * OUTPUT: + * None + * RETURN: + * NPS_E_OK -- Successfully get the counters. + * NOTES: + * None + */ +NPS_ERROR_NO_T +hal_tau_pkt_getTxIntrCnt( + const UI32_T unit, + const UI32_T channel, + UI32_T *ptr_intr_cnt) +{ + *ptr_intr_cnt = HAL_TAU_PKT_TCH_CNT(unit, channel); + return (NPS_E_OK); +} + +/* FUNCTION NAME: hal_tau_pkt_getRxIntrCnt + * PURPOSE: + * To get the PDMA RX interrupt counters of the target channel. + * INPUT: + * unit -- The unit ID + * channel -- The target channel + * OUTPUT: + * None + * RETURN: + * NPS_E_OK -- Successfully get the counters. + * NOTES: + * None + */ +NPS_ERROR_NO_T +hal_tau_pkt_getRxIntrCnt( + const UI32_T unit, + const UI32_T channel, + UI32_T *ptr_intr_cnt) +{ + *ptr_intr_cnt = HAL_TAU_PKT_RCH_CNT(unit, channel); + return (NPS_E_OK); +} + +/* FUNCTION NAME: hal_tau_pkt_getTxKnlCnt + * PURPOSE: + * To get the PDMA TX counters of the target channel. + * INPUT: + * unit -- The unit ID + * ptr_cookie -- Pointer of the TX cookie + * OUTPUT: + * None + * RETURN: + * NPS_E_OK -- Successfully get the counters. + * NOTES: + * None + */ +NPS_ERROR_NO_T +hal_tau_pkt_getTxKnlCnt( + const UI32_T unit, + HAL_TAU_PKT_IOCTL_CH_CNT_COOKIE_T *ptr_cookie) +{ + HAL_TAU_PKT_TX_CB_T *ptr_tx_cb = HAL_TAU_PKT_GET_TX_CB_PTR(unit); + + osal_io_copyToUser(&ptr_cookie->tx_cnt, &ptr_tx_cb->cnt, sizeof(HAL_TAU_PKT_TX_CNT_T)); + return (NPS_E_OK); +} + +/* FUNCTION NAME: hal_tau_pkt_getRxKnlCnt + * PURPOSE: + * To get the PDMA RX counters of the target channel. + * INPUT: + * unit -- The unit ID + * ptr_cookie -- Pointer of the RX cookie + * OUTPUT: + * None + * RETURN: + * NPS_E_OK -- Successfully get the counters. + * NOTES: + * None + */ +NPS_ERROR_NO_T +hal_tau_pkt_getRxKnlCnt( + const UI32_T unit, + HAL_TAU_PKT_IOCTL_CH_CNT_COOKIE_T *ptr_cookie) +{ + HAL_TAU_PKT_RX_CB_T *ptr_rx_cb = HAL_TAU_PKT_GET_RX_CB_PTR(unit); + + osal_io_copyToUser(&ptr_cookie->rx_cnt, &ptr_rx_cb->cnt, sizeof(HAL_TAU_PKT_RX_CNT_T)); + return (NPS_E_OK); +} + +/* FUNCTION NAME: hal_tau_pkt_clearTxKnlCnt + * PURPOSE: + * To clear the PDMA TX counters of the target channel. + * INPUT: + * unit -- The unit ID + * ptr_cookie -- Pointer of the TX cookie + * OUTPUT: + * None + * RETURN: + * NPS_E_OK -- Successfully clear the counters. + * NOTES: + * None + */ +NPS_ERROR_NO_T +hal_tau_pkt_clearTxKnlCnt( + const UI32_T unit, + HAL_TAU_PKT_IOCTL_TX_COOKIE_T *ptr_cookie) +{ + HAL_TAU_PKT_TX_CB_T *ptr_tx_cb = HAL_TAU_PKT_GET_TX_CB_PTR(unit); + + osal_memset(&ptr_tx_cb->cnt, 0, sizeof(HAL_TAU_PKT_TX_CNT_T)); + return (NPS_E_OK); +} + +/* FUNCTION NAME: hal_tau_pkt_clearRxKnlCnt + * PURPOSE: + * To clear the PDMA RX counters of the target channel. + * INPUT: + * unit -- The unit ID + * ptr_cookie -- Pointer of the RX cookie + * OUTPUT: + * None + * RETURN: + * NPS_E_OK -- Successfully clear the counters. + * NOTES: + * None + */ +NPS_ERROR_NO_T +hal_tau_pkt_clearRxKnlCnt( + const UI32_T unit, + HAL_TAU_PKT_IOCTL_RX_COOKIE_T *ptr_cookie) +{ + HAL_TAU_PKT_RX_CB_T *ptr_rx_cb = HAL_TAU_PKT_GET_RX_CB_PTR(unit); + + osal_memset(&ptr_rx_cb->cnt, 0, sizeof(HAL_TAU_PKT_RX_CNT_T)); + return (NPS_E_OK); +} + +/* FUNCTION NAME: hal_tau_pkt_setPortAttr + * PURPOSE: + * To set the port attributes such as status or speeds. + * INPUT: + * unit -- The unit ID + * ptr_cookie -- Pointer of the Port cookie + * OUTPUT: + * None + * RETURN: + * NPS_E_OK -- Successfully set the attributes. + * NOTES: + * None + */ +NPS_ERROR_NO_T +hal_tau_pkt_setPortAttr( + const UI32_T unit, + HAL_TAU_PKT_IOCTL_PORT_COOKIE_T *ptr_cookie) +{ +#define HAL_TAU_PKT_PORT_STATUS_UP (1) +#define HAL_TAU_PKT_PORT_STATUS_DOWN (0) + struct net_device *ptr_net_dev; + struct net_device_priv *ptr_priv; + UI32_T port; + UI32_T status; + NPS_PORT_SPEED_T speed; + + osal_io_copyFromUser(&port, &ptr_cookie->port, sizeof(UI32_T)); + osal_io_copyFromUser(&status, &ptr_cookie->status, sizeof(UI32_T)); + osal_io_copyFromUser(&speed, &ptr_cookie->speed, sizeof(NPS_PORT_SPEED_T)); + + ptr_net_dev = HAL_TAU_PKT_GET_PORT_NETDEV(port); + if ((NULL != ptr_net_dev) && (portspeed = SPEED_1000; + break; + case NPS_PORT_SPEED_10G: + ptr_priv->speed = SPEED_10000; + break; + case NPS_PORT_SPEED_25G: + ptr_priv->speed = 25000; + break; + case NPS_PORT_SPEED_40G: + ptr_priv->speed = 40000; + break; + case NPS_PORT_SPEED_50G: + ptr_priv->speed = 50000; + break; + case NPS_PORT_SPEED_100G: + ptr_priv->speed = 100000; + break; + default: + break; + } + } + return (NPS_E_OK); +} + + +/* ----------------------------------------------------------------------------------- independent func */ +/* FUNCTION NAME: _hal_tau_pkt_enQueue + * PURPOSE: + * To enqueue the target data. + * INPUT: + * ptr_que -- Pointer for the target queue + * ptr_data -- Pointer for the data to be enqueued + * OUTPUT: + * None + * RETURN: + * NPS_E_OK -- Successfully enqueue the data. + * NOTES: + * None + */ +static NPS_ERROR_NO_T +_hal_tau_pkt_enQueue( + HAL_TAU_PKT_SW_QUEUE_T *ptr_que, + void *ptr_data) +{ + NPS_ERROR_NO_T rc = NPS_E_OK; + + osal_takeSemaphore(&ptr_que->sema, NPS_SEMAPHORE_WAIT_FOREVER); + rc = osal_que_enque(&ptr_que->que_id, ptr_data); + osal_giveSemaphore(&ptr_que->sema); + + return (rc); +} + +/* FUNCTION NAME: _hal_tau_pkt_deQueue + * PURPOSE: + * To dequeue the target data. + * INPUT: + * ptr_que -- Pointer for the target queue + * pptr_data -- Pointer for the data pointer to be dequeued + * OUTPUT: + * None + * RETURN: + * NPS_E_OK -- Successfully dequeue the data. + * NOTES: + * None + */ +static NPS_ERROR_NO_T +_hal_tau_pkt_deQueue( + HAL_TAU_PKT_SW_QUEUE_T *ptr_que, + void **pptr_data) +{ + NPS_ERROR_NO_T rc = NPS_E_OK; + + osal_takeSemaphore(&ptr_que->sema, NPS_SEMAPHORE_WAIT_FOREVER); + rc = osal_que_deque(&ptr_que->que_id, pptr_data); + osal_giveSemaphore(&ptr_que->sema); + + return (rc); +} + +/* FUNCTION NAME: _hal_tau_pkt_getQueueCount + * PURPOSE: + * To obtain the current GPD number in the target RX queue. + * INPUT: + * ptr_que -- Pointer for the target queue + * ptr_count -- Pointer for the data count + * OUTPUT: + * None + * RETURN: + * NPS_E_OK -- Successfully obtain the GPD count. + * NPS_E_BAD_PARAMETER -- Parameter pointer is null. + * NOTES: + * None + */ +static NPS_ERROR_NO_T +_hal_tau_pkt_getQueueCount( + HAL_TAU_PKT_SW_QUEUE_T *ptr_que, + UI32_T *ptr_count) +{ + NPS_ERROR_NO_T rc = NPS_E_OK; + + osal_takeSemaphore(&ptr_que->sema, NPS_SEMAPHORE_WAIT_FOREVER); + osal_que_getCount(&ptr_que->que_id, ptr_count); + osal_giveSemaphore(&ptr_que->sema); + + return (rc); +} + +/* FUNCTION NAME: _hal_tau_pkt_allocRxPayloadBuf + * PURPOSE: + * To allocate the RX packet payload buffer for the GPD. + * INPUT: + * unit -- The unit ID + * channel -- The target RX channel + * gpd_idx -- The current GPD index + * OUTPUT: + * None + * RETURN: + * NPS_E_OK -- Successfully allocate the buffer. + * NPS_E_NO_MEMORY -- Allocate the buffer failed. + * NOTES: + * None + */ +static NPS_ERROR_NO_T +_hal_tau_pkt_allocRxPayloadBuf( + const UI32_T unit, + const UI32_T channel, + const UI32_T gpd_idx) +{ + NPS_ERROR_NO_T rc = NPS_E_NO_MEMORY; + HAL_TAU_PKT_RX_CB_T *ptr_rx_cb = HAL_TAU_PKT_GET_RX_CB_PTR(unit); + volatile HAL_TAU_PKT_RX_GPD_T *ptr_rx_gpd = HAL_TAU_PKT_GET_RX_GPD_PTR(unit, channel, gpd_idx); + NPS_ADDR_T phy_addr = 0; + + HAL_TAU_PKT_RX_PDMA_T *ptr_rx_pdma = HAL_TAU_PKT_GET_RX_PDMA_PTR(unit, channel); + struct sk_buff *ptr_skb = NULL; + + ptr_skb = osal_skb_alloc(ptr_rx_cb->buf_len); + if (NULL != ptr_skb) + { + /* map skb to dma */ + phy_addr = osal_skb_mapDma(ptr_skb, DMA_FROM_DEVICE); + if (0x0 == phy_addr) + { + HAL_TAU_PKT_DBG(HAL_TAU_PKT_DBG_ERR, + "u=%u, rxch=%u, skb dma map err, size=%u\n", + unit, channel, ptr_skb->len); + osal_skb_free(ptr_skb); + rc = NPS_E_NO_MEMORY; + } + else + { + ptr_rx_pdma->pptr_skb_ring[gpd_idx] = ptr_skb; + rc = NPS_E_OK; + } + } + + if (NPS_E_OK == rc) + { + ptr_rx_gpd->data_buf_addr_hi = NPS_ADDR_64_HI(phy_addr); + ptr_rx_gpd->data_buf_addr_lo = NPS_ADDR_64_LOW(phy_addr); + ptr_rx_gpd->avbl_buf_len = ptr_rx_cb->buf_len; + } + + return (rc); +} + +/* FUNCTION NAME: _hal_tau_pkt_freeRxPayloadBuf + * PURPOSE: + * To free the RX packet payload buffer for the GPD. + * INPUT: + * unit -- The unit ID + * channel -- The target RX channel + * gpd_idx -- The current GPD index + * OUTPUT: + * None + * RETURN: + * NPS_E_OK -- Successfully free the buffer. + * NOTES: + * None + */ +static NPS_ERROR_NO_T +_hal_tau_pkt_freeRxPayloadBuf( + const UI32_T unit, + const UI32_T channel, + const UI32_T gpd_idx) +{ + NPS_ERROR_NO_T rc = NPS_E_OTHERS; + volatile HAL_TAU_PKT_RX_GPD_T *ptr_rx_gpd = HAL_TAU_PKT_GET_RX_GPD_PTR(unit, channel, gpd_idx); + NPS_ADDR_T phy_addr = 0; + + HAL_TAU_PKT_RX_PDMA_T *ptr_rx_pdma = HAL_TAU_PKT_GET_RX_PDMA_PTR(unit, channel); + struct sk_buff *ptr_skb = NULL; + + phy_addr = NPS_ADDR_32_TO_64(ptr_rx_gpd->data_buf_addr_hi, ptr_rx_gpd->data_buf_addr_lo); + if (0x0 != phy_addr) + { + /* unmap dma */ + ptr_skb = ptr_rx_pdma->pptr_skb_ring[gpd_idx]; + osal_skb_unmapDma(phy_addr, ptr_skb->len, DMA_FROM_DEVICE); + osal_skb_free(ptr_skb); + rc = NPS_E_OK; + } + + if (NPS_E_OK == rc) + { + ptr_rx_gpd->data_buf_addr_hi = 0x0; + ptr_rx_gpd->data_buf_addr_lo = 0x0; + } + + return (rc); +} + +/* FUNCTION NAME: _hal_tau_pkt_freeRxPayloadBufGpd + * PURPOSE: + * To free the RX packet payload buffer for the GPD. + * INPUT: + * unit -- The unit ID + * ptr_sw_gpd -- The pointer of RX SW GPD + * OUTPUT: + * None + * RETURN: + * NPS_E_OK -- Successfully free the buffer. + * NOTES: + * None + */ +static NPS_ERROR_NO_T +_hal_tau_pkt_freeRxPayloadBufGpd( + const UI32_T unit, + HAL_TAU_PKT_RX_SW_GPD_T *ptr_sw_gpd) +{ + NPS_ERROR_NO_T rc = NPS_E_OTHERS; + NPS_ADDR_T phy_addr = 0; + + struct sk_buff *ptr_skb = NULL; + + phy_addr = NPS_ADDR_32_TO_64(ptr_sw_gpd->rx_gpd.data_buf_addr_hi, ptr_sw_gpd->rx_gpd.data_buf_addr_lo); + if (0x0 != phy_addr) + { + ptr_skb = ptr_sw_gpd->ptr_cookie; + osal_skb_free(ptr_skb); + rc = NPS_E_OK; + } + + return (rc); +} + +/* FUNCTION NAME: _hal_tau_initTxPdmaRing + * PURPOSE: + * To initialize the GPD ring of target TX channel. + * + * INPUT: + * unit -- The unit ID + * channel -- The target TX channel + * OUTPUT: + * None + * RETURN: + * NPS_E_OK -- Successfully initialize the GPD ring. + * NOTES: + * None + */ +static NPS_ERROR_NO_T +_hal_tau_pkt_initTxPdmaRing( + const UI32_T unit, + const HAL_TAU_PKT_TX_CHANNEL_T channel) +{ + NPS_ERROR_NO_T rc = NPS_E_OK; + HAL_TAU_PKT_TX_PDMA_T *ptr_tx_pdma = HAL_TAU_PKT_GET_TX_PDMA_PTR(unit, channel); + volatile HAL_TAU_PKT_TX_GPD_T *ptr_tx_gpd = NULL; + NPS_ADDR_T phy_addr = 0; + UI32_T gpd_idx = 0; + + for (gpd_idx = 0; gpd_idx < ptr_tx_pdma->gpd_num; gpd_idx++) + { + ptr_tx_gpd = HAL_TAU_PKT_GET_TX_GPD_PTR(unit, channel, gpd_idx); + osal_memset((void *)ptr_tx_gpd, 0x0, sizeof(HAL_TAU_PKT_TX_GPD_T)); + ptr_tx_gpd->ioc = HAL_TAU_PKT_IOC_HAS_INTR; + ptr_tx_gpd->ch = HAL_TAU_PKT_CH_LAST_GPD; + ptr_tx_gpd->hwo = HAL_TAU_PKT_HWO_SW_OWN; + osal_dma_flushCache((void *)ptr_tx_gpd, sizeof(HAL_TAU_PKT_TX_GPD_T)); + } + + phy_addr = osal_dma_convertVirtToPhy(ptr_tx_pdma->ptr_gpd_align_start_addr); + rc = _hal_tau_pkt_setTxGpdStartAddrReg(unit, channel, phy_addr, ptr_tx_pdma->gpd_num); + + return (rc); +} + +/* FUNCTION NAME: _hal_tau_pkt_initRxPdmaRing + * PURPOSE: + * To initialize the RX GPD ring. + * INPUT: + * unit -- The target unit + * channel -- The target RX channel + * OUTPUT: + * None + * RETURN: + * NPS_E_OK -- Successfully initialize the RX GPD ring. + * NOTES: + * None + */ +static NPS_ERROR_NO_T +_hal_tau_pkt_initRxPdmaRing( + const UI32_T unit, + const HAL_TAU_PKT_RX_CHANNEL_T channel) +{ + NPS_ERROR_NO_T rc = NPS_E_OK; + HAL_TAU_PKT_RX_PDMA_T *ptr_rx_pdma = HAL_TAU_PKT_GET_RX_PDMA_PTR(unit, channel); + volatile HAL_TAU_PKT_RX_GPD_T *ptr_rx_gpd = NULL; + NPS_ADDR_T phy_addr = 0; + UI32_T gpd_idx = 0; + + for (gpd_idx = 0; gpd_idx < ptr_rx_pdma->gpd_num; gpd_idx++) + { + ptr_rx_gpd = HAL_TAU_PKT_GET_RX_GPD_PTR(unit, channel, gpd_idx); + osal_memset((void *)ptr_rx_gpd, 0x0, sizeof(HAL_TAU_PKT_RX_GPD_T)); + ptr_rx_gpd->ioc = HAL_TAU_PKT_IOC_NO_INTR; + ptr_rx_gpd->hwo = HAL_TAU_PKT_HWO_SW_OWN; + osal_dma_flushCache((void *)ptr_rx_gpd, sizeof(HAL_TAU_PKT_RX_GPD_T)); + } + + phy_addr = osal_dma_convertVirtToPhy(ptr_rx_pdma->ptr_gpd_align_start_addr); + rc = _hal_tau_pkt_setRxGpdStartAddrReg(unit, channel, phy_addr, ptr_rx_pdma->gpd_num); + + return (rc); +} + +/* FUNCTION NAME: _hal_tau_pkt_initRxPdmaRingBuf + * PURPOSE: + * To de-init the Rx PDMA ring configuration. + * INPUT: + * unit -- The unit ID + * channel -- The target RX channel + * OUTPUT: + * None + * RETURN: + * NPS_E_OK -- Successfully de-init the Rx PDMA ring. + * NOTES: + * None + */ +static NPS_ERROR_NO_T +_hal_tau_pkt_initRxPdmaRingBuf( + const UI32_T unit, + const HAL_TAU_PKT_RX_CHANNEL_T channel) +{ + NPS_ERROR_NO_T rc = NPS_E_OK; + HAL_TAU_PKT_RX_CB_T *ptr_rx_cb = HAL_TAU_PKT_GET_RX_CB_PTR(unit); + HAL_TAU_PKT_RX_PDMA_T *ptr_rx_pdma = HAL_TAU_PKT_GET_RX_PDMA_PTR(unit, channel); + volatile HAL_TAU_PKT_RX_GPD_T *ptr_rx_gpd = NULL; + UI32_T gpd_idx = 0; + + if (0 == ptr_rx_cb->buf_len) + { + return (NPS_E_BAD_PARAMETER); + } + + for (gpd_idx = 0; gpd_idx < ptr_rx_pdma->gpd_num; gpd_idx++) + { + ptr_rx_gpd = HAL_TAU_PKT_GET_RX_GPD_PTR(unit, channel, gpd_idx); + osal_dma_invalidateCache((void *)ptr_rx_gpd, sizeof(HAL_TAU_PKT_RX_GPD_T)); + + rc = _hal_tau_pkt_allocRxPayloadBuf(unit, channel, gpd_idx); + if (NPS_E_OK == rc) + { + ptr_rx_gpd->ioc = HAL_TAU_PKT_IOC_HAS_INTR; + ptr_rx_gpd->hwo = HAL_TAU_PKT_HWO_HW_OWN; + osal_dma_flushCache((void *)ptr_rx_gpd, sizeof(HAL_TAU_PKT_RX_GPD_T)); + } + else + { + ptr_rx_cb->cnt.no_memory++; + break; + } + } + + return (rc); +} + +/* FUNCTION NAME: _hal_tau_pkt_deinitRxPdmaRingBuf + * PURPOSE: + * To de-init the Rx PDMA ring configuration. + * INPUT: + * unit -- The unit ID + * channel -- The target RX channel + * OUTPUT: + * None + * RETURN: + * NPS_E_OK -- Successfully de-init the Rx PDMA ring. + * NOTES: + * None + */ +static NPS_ERROR_NO_T +_hal_tau_pkt_deinitRxPdmaRingBuf( + const UI32_T unit, + const HAL_TAU_PKT_RX_CHANNEL_T channel) +{ + NPS_ERROR_NO_T rc = NPS_E_OK; + HAL_TAU_PKT_RX_PDMA_T *ptr_rx_pdma = HAL_TAU_PKT_GET_RX_PDMA_PTR(unit, channel); + volatile HAL_TAU_PKT_RX_GPD_T *ptr_rx_gpd = NULL; + UI32_T gpd_idx = 0; + + for (gpd_idx = 0; ((gpd_idx < ptr_rx_pdma->gpd_num) && (NPS_E_OK == rc)); gpd_idx++) + { + /* mark the GPD as invalid to prevent Rx-done task to process it */ + ptr_rx_gpd = HAL_TAU_PKT_GET_RX_GPD_PTR(unit, channel, gpd_idx); + ptr_rx_gpd->hwo = HAL_TAU_PKT_HWO_HW_OWN; + osal_dma_flushCache((void *)ptr_rx_gpd, sizeof(HAL_TAU_PKT_RX_GPD_T)); + + rc = _hal_tau_pkt_freeRxPayloadBuf(unit, channel, gpd_idx); + } + return (rc); +} + +/* FUNCTION NAME: _hal_tau_pkt_recoverTxPdma + * PURPOSE: + * To recover the PDMA status to the initial state. + * INPUT: + * unit -- The unit ID + * channel -- The target TX channel + * OUTPUT: + * None + * RETURN: + * NPS_E_OK -- Successfully recover PDMA. + * NOTES: + * + */ +static NPS_ERROR_NO_T +_hal_tau_pkt_recoverTxPdma( + const UI32_T unit, + const HAL_TAU_PKT_TX_CHANNEL_T channel) +{ + NPS_ERROR_NO_T rc = NPS_E_OK; + HAL_TAU_PKT_TX_PDMA_T *ptr_tx_pdma = HAL_TAU_PKT_GET_TX_PDMA_PTR(unit, channel); + + /* Release the software GPD ring and configure it again. */ + ptr_tx_pdma->used_idx = 0; + ptr_tx_pdma->free_idx = 0; + ptr_tx_pdma->used_gpd_num = 0; + ptr_tx_pdma->free_gpd_num = ptr_tx_pdma->gpd_num; + + _hal_tau_pkt_stopTxChannelReg(unit, channel); + rc = _hal_tau_pkt_initTxPdmaRing(unit, channel); + _hal_tau_pkt_startTxChannelReg(unit, channel, 0); + + return (rc); +} + +/* FUNCTION NAME: _hal_tau_pkt_recoverRxPdma + * PURPOSE: + * To recover the RX PDMA from the error state. + * INPUT: + * unit -- The unit ID + * channel -- The target RX channel + * OUTPUT: + * None + * RETURN: + * NPS_E_OK -- Successfully recovery the PDMA. + * NOTES: + * None + */ +static NPS_ERROR_NO_T +_hal_tau_pkt_recoverRxPdma( + const UI32_T unit, + const HAL_TAU_PKT_RX_CHANNEL_T channel) +{ + NPS_ERROR_NO_T rc = NPS_E_OK; + HAL_TAU_PKT_RX_PDMA_T *ptr_rx_pdma = HAL_TAU_PKT_GET_RX_PDMA_PTR(unit, channel); + + /* Release the software GPD ring and configure it again. */ + ptr_rx_pdma->cur_idx = 0; + + _hal_tau_pkt_stopRxChannelReg(unit, channel); + rc = _hal_tau_pkt_deinitRxPdmaRingBuf(unit, channel); + if (NPS_E_OK != rc) + { + return (rc); + } + rc = _hal_tau_pkt_initRxPdmaRing(unit, channel); + if (NPS_E_OK != rc) + { + return (rc); + } + rc = _hal_tau_pkt_initRxPdmaRingBuf(unit, channel); + if (NPS_E_OK != rc) + { + return (rc); + } + _hal_tau_pkt_startRxChannelReg(unit, channel, ptr_rx_pdma->gpd_num); + + return (rc); +} + +/* FUNCTION NAME: _hal_tau_pkt_freeTxGpdList + * PURPOSE: + * To free the TX SW GPD link list. + * INPUT: + * unit -- The unit ID + * ptr_sw_gpd -- The pointer of TX SW GPD + * OUTPUT: + * None + * RETURN: + * NPS_E_OK -- Successfully free the GPD list. + * NOTES: + * None + */ +static void +_hal_tau_pkt_freeTxGpdList( + UI32_T unit, + HAL_TAU_PKT_TX_SW_GPD_T *ptr_sw_gpd) +{ + HAL_TAU_PKT_TX_SW_GPD_T *ptr_sw_gpd_cur = NULL; + + while (NULL != ptr_sw_gpd) + { + ptr_sw_gpd_cur = ptr_sw_gpd; + ptr_sw_gpd = ptr_sw_gpd->ptr_next; + osal_free(ptr_sw_gpd_cur); + } +} + +/* FUNCTION NAME: _hal_tau_pkt_freeRxGpdList + * PURPOSE: + * To free the RX SW GPD link list. + * INPUT: + * unit -- The unit ID + * ptr_sw_gpd -- The pointer of RX SW GPD + * free_payload -- TRUE: To free the buf in SDK, FALSE: in user process. + * OUTPUT: + * None + * RETURN: + * NPS_E_OK -- Successfully recovery the PDMA. + * NOTES: + * None + */ +static NPS_ERROR_NO_T +_hal_tau_pkt_freeRxGpdList( + UI32_T unit, + HAL_TAU_PKT_RX_SW_GPD_T *ptr_sw_gpd, + BOOL_T free_payload) +{ + HAL_TAU_PKT_RX_SW_GPD_T *ptr_sw_gpd_cur = NULL; + + while (NULL != ptr_sw_gpd) + { + ptr_sw_gpd_cur = ptr_sw_gpd; + ptr_sw_gpd = ptr_sw_gpd->ptr_next; + if (TRUE == free_payload) + { + _hal_tau_pkt_freeRxPayloadBufGpd(unit, ptr_sw_gpd_cur); + } + osal_free(ptr_sw_gpd_cur); + } + + return (NPS_E_OK); +} + +/* ----------------------------------------------------------------------------------- pkt_drv */ +/* FUNCTION NAME: _hal_tau_pkt_txEnQueueBulk + * PURPOSE: + * To enqueue numbers of packet in the bulk buffer + * INPUT: + * unit -- The unit ID + * channel -- The target channel + * number -- The number of packet to be enqueue + * OUTPUT: + * None + * RETURN: + * None + * NOTES: + * None + */ +static void +_hal_tau_pkt_txEnQueueBulk( + const UI32_T unit, + const UI32_T channel, + const UI32_T number) +{ + HAL_TAU_PKT_TX_PDMA_T *ptr_tx_pdma = HAL_TAU_PKT_GET_TX_PDMA_PTR(unit, channel); + HAL_TAU_PKT_TX_SW_GPD_T *ptr_sw_gpd = NULL; + UI32_T idx; + + for (idx = 0; idx < number; idx++) + { + ptr_sw_gpd = ptr_tx_pdma->pptr_sw_gpd_bulk[idx]; + ptr_tx_pdma->pptr_sw_gpd_bulk[idx] = NULL; + if (NULL != ptr_sw_gpd->callback) + { + ptr_sw_gpd->callback(unit, ptr_sw_gpd, ptr_sw_gpd->ptr_cookie); + } + } +} + + +/* FUNCTION NAME: _hal_tau_pkt_strictTxDeQueue + * PURPOSE: + * To dequeue the packets based on the strict algorithm. + * INPUT: + * unit -- The unit ID + * ptr_cookie -- Pointer of the TX cookie + * OUTPUT: + * None + * RETURN: + * NPS_E_OK -- Successfully dequeue the packets. + * NOTES: + * None + */ +static NPS_ERROR_NO_T +_hal_tau_pkt_strictTxDeQueue( + const UI32_T unit, + HAL_TAU_PKT_IOCTL_TX_COOKIE_T *ptr_cookie) +{ + NPS_ERROR_NO_T rc = NPS_E_OK; + HAL_TAU_PKT_TX_CB_T *ptr_tx_cb = HAL_TAU_PKT_GET_TX_CB_PTR(unit); + HAL_TAU_PKT_TX_SW_GPD_T *ptr_sw_gpd = NULL; + NPS_ADDR_T sw_gpd_addr; + UI32_T que_cnt = 0; + + /* get queue count */ + _hal_tau_pkt_getQueueCount(&ptr_tx_cb->sw_queue, &que_cnt); + + /* wait txTask event */ + if (0 == que_cnt) + { + osal_waitEvent(&ptr_tx_cb->sync_sema); + if (FALSE == ptr_tx_cb->running) + { + return (NPS_E_OTHERS); /* deinit */ + } + + ptr_tx_cb->cnt.wait_event++; + + /* re-get queue count */ + _hal_tau_pkt_getQueueCount(&ptr_tx_cb->sw_queue, &que_cnt); + } + + /* deque */ + if (que_cnt > 0) + { + rc = _hal_tau_pkt_deQueue(&ptr_tx_cb->sw_queue, (void **)&ptr_sw_gpd); + if (NPS_E_OK == rc) + { + ptr_tx_cb->cnt.deque_ok++; + + sw_gpd_addr = (NPS_ADDR_T)ptr_sw_gpd->ptr_cookie; + + /* Give the address of pre-saved SW GPD back to userspace */ + osal_io_copyToUser(&ptr_cookie->done_sw_gpd_addr, + &sw_gpd_addr, + sizeof(NPS_ADDR_T)); + + /* free kernel sw_gpd */ + _hal_tau_pkt_freeTxGpdList(unit, ptr_sw_gpd); + } + else + { + ptr_tx_cb->cnt.deque_fail++; + } + } + else + { + /* It may happen at last gpd, return error and do not invoke callback. */ + rc = NPS_E_OTHERS; + } + + return (rc); +} + +/* FUNCTION NAME: _hal_tau_pkt_rxCheckReason + * PURPOSE: + * To check the packets to linux kernel/user. + * INPUT: + * ptr_rx_gpd -- Pointer of the RX GPD + * ptr_hit_prof -- Pointer of the hit flag + * OUTPUT: + * None + * RETURN: + * NPS_E_OK -- Successfully dispatch the packets. + * NOTES: + * Reference to pkt_srv. + */ +static void +_hal_tau_pkt_rxCheckReason( + volatile HAL_TAU_PKT_RX_GPD_T *ptr_rx_gpd, + HAL_TAU_PKT_NETIF_PROFILE_T *ptr_profile, + BOOL_T *ptr_hit_prof) +{ + HAL_TAU_PKT_RX_REASON_BITMAP_T *ptr_reason_bitmap = &ptr_profile->reason_bitmap; + UI32_T bitval = 0; + UI32_T bitmap = 0x0; + + if (0 == (ptr_profile->flags & HAL_TAU_PKT_NETIF_PROFILE_FLAGS_REASON)) + { + /* It means that reason doesn't metters */ + *ptr_hit_prof = TRUE; + return; + } + +#define HAL_TAU_PKT_DI_NON_L3_CPU_MIN (HAL_TAU_EXCPT_CPU_BASE_ID + HAL_TAU_EXCPT_CPU_NON_L3_MIN) +#define HAL_TAU_PKT_DI_NON_L3_CPU_MAX (HAL_TAU_EXCPT_CPU_BASE_ID + HAL_TAU_EXCPT_CPU_NON_L3_MAX) +#define HAL_TAU_PKT_DI_L3_CPU_MIN (HAL_TAU_EXCPT_CPU_BASE_ID + HAL_TAU_EXCPT_CPU_L3_MIN) +#define HAL_TAU_PKT_DI_L3_CPU_MAX (HAL_TAU_EXCPT_CPU_BASE_ID + HAL_TAU_EXCPT_CPU_L3_MAX) + + switch (ptr_rx_gpd->itmh_eth.typ) + { + case HAL_TAU_PKT_TMH_TYPE_ITMH_ETH: + + /* IPP non-L3 exception */ + if (ptr_rx_gpd->itmh_eth.dst_idx >= HAL_TAU_PKT_DI_NON_L3_CPU_MIN && + ptr_rx_gpd->itmh_eth.dst_idx <= HAL_TAU_PKT_DI_NON_L3_CPU_MAX) + { + bitval = ptr_rx_gpd->itmh_eth.dst_idx - HAL_TAU_PKT_DI_NON_L3_CPU_MIN; + bitmap = 1 << (bitval % 32); + if (0 != (ptr_reason_bitmap->ipp_excpt_bitmap[bitval / 32] & bitmap)) + { + *ptr_hit_prof = TRUE; + break; + } + } + + /* IPP L3 exception */ + if (ptr_rx_gpd->itmh_eth.dst_idx >= HAL_TAU_PKT_DI_L3_CPU_MIN && + ptr_rx_gpd->itmh_eth.dst_idx <= HAL_TAU_PKT_DI_L3_CPU_MAX) + { + bitmap = ptr_rx_gpd->itmh_eth.dst_idx - HAL_TAU_PKT_DI_L3_CPU_MIN; + if (0 != (ptr_reason_bitmap->ipp_l3_excpt_bitmap[0] & bitmap)) + { + *ptr_hit_prof = TRUE; + break; + } + } + + /* IPP cp_to_cpu_bmap */ + bitmap = ptr_rx_gpd->itmh_eth.cp_to_cpu_bmap; + if (0 != (ptr_reason_bitmap->ipp_copy2cpu_bitmap[0] & bitmap)) + { + *ptr_hit_prof = TRUE; + break; + } + + /* IPP cp_to_cpu_rsn */ + bitval = ptr_rx_gpd->itmh_eth.cp_to_cpu_code; + bitmap = 1 << (bitval % 32); + if (0 != (ptr_reason_bitmap->ipp_rsn_bitmap[bitval / 32] & bitmap)) + { + *ptr_hit_prof = TRUE; + break; + } + break; + + case HAL_TAU_PKT_TMH_TYPE_ITMH_FAB: + case HAL_TAU_PKT_TMH_TYPE_ETMH_FAB: + break; + + case HAL_TAU_PKT_TMH_TYPE_ETMH_ETH: + + /* EPP exception */ + if (1 == ptr_rx_gpd->etmh_eth.redir) + { + bitval = ptr_rx_gpd->etmh_eth.excpt_code_mir_bmap; + bitmap = 1 << (bitval % 32); + if (0 != (ptr_reason_bitmap->epp_excpt_bitmap[bitval / 32] & bitmap)) + { + *ptr_hit_prof = TRUE; + break; + } + } + + /* EPP cp_to_cpu_bmap */ + bitmap = ((ptr_rx_gpd->etmh_eth.cp_to_cpu_bmap_w0 << 7) | + (ptr_rx_gpd->etmh_eth.cp_to_cpu_bmap_w1)); + if (0 != (ptr_reason_bitmap->epp_copy2cpu_bitmap[0] & bitmap)) + { + *ptr_hit_prof = TRUE; + break; + } + break; + + default: + *ptr_hit_prof = FALSE; + break; + } +} + +static BOOL_T +_hal_tau_pkt_comparePatternWithPayload( + volatile HAL_TAU_PKT_RX_GPD_T *ptr_rx_gpd, + const UI8_T *ptr_pattern, + const UI8_T *ptr_mask, + const UI32_T offset) +{ + NPS_ADDR_T phy_addr = 0; + UI8_T *ptr_virt_addr = NULL; + UI32_T idx; + + /* Get the packet payload */ + phy_addr = NPS_ADDR_32_TO_64(ptr_rx_gpd->data_buf_addr_hi, ptr_rx_gpd->data_buf_addr_lo); + ptr_virt_addr = (C8_T *) osal_dma_convertPhyToVirt(phy_addr); + + for (idx=0; idxflags & (HAL_TAU_PKT_NETIF_PROFILE_FLAGS_PATTERN_0 | + HAL_TAU_PKT_NETIF_PROFILE_FLAGS_PATTERN_1 | + HAL_TAU_PKT_NETIF_PROFILE_FLAGS_PATTERN_2 | + HAL_TAU_PKT_NETIF_PROFILE_FLAGS_PATTERN_3)) != 0) + { + /* Need to compare the payload with at least one of the four patterns */ + /* Pre-assume that the result is positive */ + *ptr_hit_prof = TRUE; + + /* If any of the following comparison fails, the result will be changed to negtive */ + } + else + { + return; + } + + for (idx=0; idxflags & (HAL_TAU_PKT_NETIF_PROFILE_FLAGS_PATTERN_0 << idx))) + { + match = _hal_tau_pkt_comparePatternWithPayload(ptr_rx_gpd, + ptr_profile->pattern[idx], + ptr_profile->mask[idx], + ptr_profile->offset[idx]); + if (TRUE == match) + { + /* Do nothing */ + } + else + { + /* Change the result to negtive */ + *ptr_hit_prof = FALSE; + break; + } + } + } +} + +static void +_hal_tau_pkt_matchUserProfile( + volatile HAL_TAU_PKT_RX_GPD_T *ptr_rx_gpd, + HAL_TAU_PKT_PROFILE_NODE_T *ptr_profile_list, + BOOL_T *ptr_hit_prof) +{ + HAL_TAU_PKT_PROFILE_NODE_T *ptr_curr_node = ptr_profile_list; + + while (NULL != ptr_curr_node) + { + /* 1st match reason */ + _hal_tau_pkt_rxCheckReason(ptr_rx_gpd, ptr_curr_node->ptr_profile, ptr_hit_prof); + if (TRUE == *ptr_hit_prof) + { + HAL_TAU_PKT_DBG(HAL_TAU_PKT_DBG_PROFILE, + "rx prof matched by reason\n"); + + /* Then, check pattern */ + _hal_tau_pkt_rxCheckPattern(ptr_rx_gpd, ptr_curr_node->ptr_profile, ptr_hit_prof); + if (TRUE == *ptr_hit_prof) + { + HAL_TAU_PKT_DBG(HAL_TAU_PKT_DBG_PROFILE, + "rx prof matched by pattern\n"); + break; + } + } + + /* Seach the next profile (priority lower) */ + ptr_curr_node = ptr_curr_node->ptr_next_node; + } +} + +static void +_hal_tau_pkt_getPacketDest( + volatile HAL_TAU_PKT_RX_GPD_T *ptr_rx_gpd, + HAL_TAU_PKT_DEST_T *ptr_dest) +{ + BOOL_T hit_prof = FALSE; + UI32_T port; + HAL_TAU_PKT_PROFILE_NODE_T *ptr_profile_list; + + port = ptr_rx_gpd->itmh_eth.igr_phy_port; + ptr_profile_list = HAL_TAU_PKT_GET_PORT_PROFILE_LIST(port); + + _hal_tau_pkt_matchUserProfile(ptr_rx_gpd, ptr_profile_list, &hit_prof); + if (TRUE == hit_prof) + { + *ptr_dest = HAL_TAU_PKT_DEST_SDK; + } + else + { + *ptr_dest = HAL_TAU_PKT_DEST_NETDEV; + } +} + +/* FUNCTION NAME: _hal_tau_pkt_rxEnQueue + * PURPOSE: + * To enqueue the packets to multiple queues. + * INPUT: + * unit -- The unit ID + * channel -- The target channel + * ptr_sw_gpd -- Pointer for the SW Rx GPD link list + * OUTPUT: + * None + * RETURN: + * NPS_E_OK -- Successfully enqueue the packets. + * NOTES: + * None + */ +static void +_hal_tau_pkt_rxEnQueue( + const UI32_T unit, + const UI32_T channel, + HAL_TAU_PKT_RX_SW_GPD_T *ptr_sw_gpd) +{ + HAL_TAU_PKT_RX_CB_T *ptr_rx_cb = HAL_TAU_PKT_GET_RX_CB_PTR(unit); + HAL_TAU_PKT_RX_SW_GPD_T *ptr_sw_first_gpd = ptr_sw_gpd; + void *ptr_virt_addr = NULL; + NPS_ADDR_T phy_addr = 0; + HAL_TAU_PKT_DEST_T pkt_dest; + + /* skb meta */ + UI32_T port = 0, len = 0, total_len = 0; + struct net_device *ptr_net_dev = NULL; + struct net_device_priv *ptr_priv = NULL; + struct sk_buff *ptr_skb = NULL, *ptr_merge_skb = NULL; + UI32_T copy_offset; + +#if defined(PERF_EN_TEST) + /* To verify kernel Rx performance */ + if (NPS_E_OK == perf_rxTest()) + { + while (NULL != ptr_sw_gpd) + { + len += (HAL_TAU_PKT_CH_LAST_GPD == ptr_sw_gpd->rx_gpd.ch)? + ptr_sw_gpd->rx_gpd.cnsm_buf_len : ptr_sw_gpd->rx_gpd.avbl_buf_len; + + total_len += len; + + /* unmap dma */ + phy_addr = NPS_ADDR_32_TO_64(ptr_sw_gpd->rx_gpd.data_buf_addr_hi, ptr_sw_gpd->rx_gpd.data_buf_addr_lo); + osal_skb_unmapDma(phy_addr, len, DMA_FROM_DEVICE); + /* next */ + ptr_sw_gpd = ptr_sw_gpd->ptr_next; + } + perf_rxCallback(total_len); + _hal_tau_pkt_freeRxGpdList(unit, ptr_sw_first_gpd, TRUE); + return ; + } +#endif + + _hal_tau_pkt_getPacketDest(&ptr_sw_gpd->rx_gpd, &pkt_dest); + if (HAL_TAU_PKT_DEST_NETDEV == pkt_dest) + { + ptr_sw_gpd = ptr_sw_first_gpd; + while (NULL != ptr_sw_gpd) + { + len = (HAL_TAU_PKT_CH_LAST_GPD == ptr_sw_gpd->rx_gpd.ch)? + ptr_sw_gpd->rx_gpd.cnsm_buf_len : ptr_sw_gpd->rx_gpd.avbl_buf_len; + + total_len += len; + + /* unmap dma */ + phy_addr = NPS_ADDR_32_TO_64(ptr_sw_gpd->rx_gpd.data_buf_addr_hi, ptr_sw_gpd->rx_gpd.data_buf_addr_lo); + ptr_virt_addr = ptr_sw_gpd->ptr_cookie; + + ptr_skb = (struct sk_buff *)ptr_virt_addr; + + /* note here ptr_skb->len is the total buffer size not means the actual Rx packet len + * it should be updated later + */ + osal_skb_unmapDma(phy_addr, ptr_skb->len, DMA_FROM_DEVICE); + + /* reset ptr_skb->len with real packet len instead of total buffer size */ + if (NULL == ptr_sw_gpd->ptr_next) + { + /* strip CRC padded by asic for the last gpd segment */ + ptr_skb->len = len - ETH_FCS_LEN; + } + else + { + ptr_skb->len = len; + } + + skb_set_tail_pointer(ptr_skb, ptr_skb->len); + + /* next */ + ptr_sw_gpd = ptr_sw_gpd->ptr_next; + } + + /* if the packet is composed of multiple gpd (skb), need to merge it into a single skb */ + if (NULL != ptr_sw_first_gpd->ptr_next) + { + HAL_TAU_PKT_DBG(HAL_TAU_PKT_DBG_RX, + "u=%u, rxch=%u, rcv pkt size=%u > gpd buf size=%u\n", + unit, channel, total_len, ptr_rx_cb->buf_len); + ptr_merge_skb = osal_skb_alloc(total_len - ETH_FCS_LEN); + if (NULL != ptr_merge_skb) + { + copy_offset = 0; + ptr_sw_gpd = ptr_sw_first_gpd; + while (NULL != ptr_sw_gpd) + { + ptr_skb = (struct sk_buff *)ptr_sw_gpd->ptr_cookie; + HAL_TAU_PKT_DBG(HAL_TAU_PKT_DBG_RX, + "u=%u, rxch=%u, copy size=%u to buf offset=%u\n", + unit, channel, ptr_skb->len, copy_offset); + + memcpy(&(((UI8_T *)ptr_merge_skb->data)[copy_offset]), + ptr_skb->data, ptr_skb->len); + copy_offset += ptr_skb->len; + ptr_sw_gpd = ptr_sw_gpd->ptr_next; + } + /* put the merged skb to ptr_skb for the following process */ + ptr_skb = ptr_merge_skb; + } + else + { + HAL_TAU_PKT_DBG((HAL_TAU_PKT_DBG_ERR | HAL_TAU_PKT_DBG_RX), + "u=%u, rxch=%u, alloc skb failed, size=%u\n", + unit, channel, (total_len - ETH_FCS_LEN)); + } + + /* free both sw_gpd and the skb attached on it */ + _hal_tau_pkt_freeRxGpdList(unit, ptr_sw_first_gpd, TRUE); + } + else + { + /* free only sw_gpd */ + _hal_tau_pkt_freeRxGpdList(unit, ptr_sw_first_gpd, FALSE); + } + + /* get port and net_device */ + port = ptr_sw_first_gpd->rx_gpd.itmh_eth.igr_phy_port; + ptr_net_dev = HAL_TAU_PKT_GET_PORT_NETDEV(port); + + /* if NULL netdev, drop the skb */ + if (NULL == ptr_net_dev) + { + ptr_rx_cb->cnt.channel[channel].netdev_miss++; + osal_skb_free(ptr_skb); + HAL_TAU_PKT_DBG((HAL_TAU_PKT_DBG_ERR | HAL_TAU_PKT_DBG_RX), + "u=%u, rxch=%u, find netdev failed\n", + unit, channel); + return; + } + + /* skb handling */ + ptr_skb->dev = ptr_net_dev; + ptr_skb->pkt_type = PACKET_HOST; /* this packet is for me */ + ptr_skb->protocol = eth_type_trans(ptr_skb, ptr_net_dev); /* skip ethernet header */ + ptr_skb->ip_summed = CHECKSUM_UNNECESSARY; /* skip checksum */ + + /* send to linux */ + osal_skb_recv(ptr_skb); +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0) + ptr_net_dev->last_rx = jiffies; +#endif + ptr_priv = netdev_priv(ptr_net_dev); + ptr_priv->stats.rx_packets++; + ptr_priv->stats.rx_bytes += total_len; + } + else if (HAL_TAU_PKT_DEST_SDK == pkt_dest) + { + while (0 != _hal_tau_pkt_enQueue(&ptr_rx_cb->sw_queue[channel], ptr_sw_gpd)) + { + ptr_rx_cb->cnt.channel[channel].enque_retry++; + HAL_TAU_PKT_RX_ENQUE_RETRY_SLEEP(); + } + ptr_rx_cb->cnt.channel[channel].enque_ok++; + + osal_triggerEvent(&ptr_rx_cb->sync_sema); + ptr_rx_cb->cnt.channel[channel].trig_event++; + } + else if (HAL_TAU_PKT_DEST_DROP == pkt_dest) + { + _hal_tau_pkt_freeRxGpdList(unit, ptr_sw_first_gpd, TRUE); + } + else + { + HAL_TAU_PKT_DBG((HAL_TAU_PKT_DBG_ERR | HAL_TAU_PKT_DBG_RX), + "u=%u, rxch=%u, invalid pkt dest=%d\n", + unit, channel, pkt_dest); + } +} + +/* FUNCTION NAME: _hal_tau_pkt_schedRxDeQueue + * PURPOSE: + * To dequeue the packets based on the configured algorithm. + * INPUT: + * unit -- The unit ID + * ptr_cookie -- Pointer of the RX cookie + * OUTPUT: + * None + * RETURN: + * NPS_E_OK -- Successfully dequeue the packets. + * NOTES: + * None + */ +static NPS_ERROR_NO_T +_hal_tau_pkt_schedRxDeQueue( + const UI32_T unit, + HAL_TAU_PKT_IOCTL_RX_COOKIE_T *ptr_cookie) +{ + HAL_TAU_PKT_IOCTL_RX_COOKIE_T ioctl_data; + HAL_TAU_PKT_IOCTL_RX_GPD_T ioctl_gpd; + HAL_TAU_PKT_RX_CB_T *ptr_rx_cb = HAL_TAU_PKT_GET_RX_CB_PTR(unit); + HAL_TAU_PKT_RX_SW_GPD_T *ptr_sw_gpd_knl = NULL; + HAL_TAU_PKT_RX_SW_GPD_T *ptr_sw_first_gpd_knl = NULL; + UI32_T que_cnt = 0; + UI32_T queue = 0; + UI32_T idx = 0; + UI32_T gpd_idx = 0; + /* copy Rx sw_gpd */ + volatile HAL_TAU_PKT_RX_GPD_T *ptr_rx_gpd = NULL; + void *ptr_virt_addr = NULL; + NPS_ADDR_T phy_addr = 0; + UI32_T buf_len = 0; + NPS_ERROR_NO_T rc = NPS_E_OK; + + /* get queue and count */ + for (idx = 0; idx < HAL_TAU_PKT_RX_QUEUE_NUM; idx++) + { + /* to gurantee the opportunity where each queue can be handler */ + queue = ((ptr_rx_cb->deque_idx + idx) % HAL_TAU_PKT_RX_QUEUE_NUM); + _hal_tau_pkt_getQueueCount(&ptr_rx_cb->sw_queue[queue], &que_cnt); + if (que_cnt > 0) + { + ptr_rx_cb->deque_idx = ((queue + 1) % HAL_TAU_PKT_RX_QUEUE_NUM); + break; + } + } + + /* If all of the queues are empty, wait rxTask event */ + if (0 == que_cnt) + { + osal_waitEvent(&ptr_rx_cb->sync_sema); + if (FALSE == ptr_rx_cb->running) + { + return (NPS_E_OTHERS); /* deinit */ + } + + ptr_rx_cb->cnt.wait_event++; + + /* re-get queue and count */ + for (queue = 0; queue < HAL_TAU_PKT_RX_QUEUE_NUM; queue++) + { + _hal_tau_pkt_getQueueCount(&ptr_rx_cb->sw_queue[queue], &que_cnt); + if (que_cnt > 0) + { + ptr_rx_cb->deque_idx = ((queue + 1) % HAL_TAU_PKT_RX_QUEUE_NUM); + break; + } + } + } + + /* deque */ + if ((que_cnt > 0) && (queue < HAL_TAU_PKT_RX_QUEUE_NUM)) + { + rc = _hal_tau_pkt_deQueue(&ptr_rx_cb->sw_queue[queue], (void **)&ptr_sw_gpd_knl); + if (NPS_E_OK == rc) + { + ptr_rx_cb->cnt.channel[queue].deque_ok++; + ptr_sw_first_gpd_knl = ptr_sw_gpd_knl; + + osal_io_copyFromUser(&ioctl_data, ptr_cookie, sizeof(HAL_TAU_PKT_IOCTL_RX_COOKIE_T)); + + while (NULL != ptr_sw_gpd_knl) + { + /* get the IOCTL GPD from user */ + osal_io_copyFromUser(&ioctl_gpd, + ((void *)((NPS_HUGE_T)ioctl_data.ioctl_gpd_addr)) + + gpd_idx*sizeof(HAL_TAU_PKT_IOCTL_RX_GPD_T), + sizeof(HAL_TAU_PKT_IOCTL_RX_GPD_T)); + + /* get knl buf addr */ + ptr_rx_gpd = &ptr_sw_gpd_knl->rx_gpd; + phy_addr = NPS_ADDR_32_TO_64(ptr_rx_gpd->data_buf_addr_hi, ptr_rx_gpd->data_buf_addr_lo); + + ptr_virt_addr = ptr_sw_gpd_knl->ptr_cookie; + osal_skb_unmapDma(phy_addr, ((struct sk_buff *)ptr_virt_addr)->len, DMA_FROM_DEVICE); + + buf_len = (HAL_TAU_PKT_CH_LAST_GPD == ptr_rx_gpd->ch)? + ptr_rx_gpd->cnsm_buf_len : ptr_rx_gpd->avbl_buf_len; + + /* overwrite whole rx_gpd to user + * the user should re-assign the correct value to data_buf_addr_hi, data_buf_addr_low + * after this IOCTL returns + */ + osal_io_copyToUser((void *)((NPS_HUGE_T)ioctl_gpd.hw_gpd_addr), + &ptr_sw_gpd_knl->rx_gpd, + sizeof(HAL_TAU_PKT_RX_GPD_T)); + /* copy buf */ + /* DMA buf address allocated by the user is store in ptr_ioctl_data->gpd[idx].cookie */ + osal_io_copyToUser((void *)((NPS_HUGE_T)ioctl_gpd.dma_buf_addr), + ((struct sk_buff *)ptr_virt_addr)->data, buf_len); + ptr_sw_gpd_knl->ptr_cookie = ptr_virt_addr; + + /* next */ + ptr_sw_gpd_knl = ptr_sw_gpd_knl->ptr_next; + gpd_idx++; + } + + /* Must free kernel sw_gpd */ + _hal_tau_pkt_freeRxGpdList(unit, ptr_sw_first_gpd_knl, TRUE); + } + else + { + ptr_rx_cb->cnt.channel[queue].deque_fail++; + } + } + else + { + /* It may happen at last gpd, return error and do not invoke callback. */ + rc = NPS_E_OTHERS; + } + + return (rc); +} + +/* FUNCTION NAME: _hal_tau_pkt_waitTxDone + * PURPOSE: + * To determine the next action after transfer the packet to HW. + * INPUT: + * unit -- The unit ID + * channel -- The target TX channel + * ptr_sw_gpd -- Pointer for the SW Tx GPD link list + * OUTPUT: + * None + * RETURN: + * NPS_E_OK -- Successfully perform the target action. + * NOTES: + * None + */ +static NPS_ERROR_NO_T +_hal_tau_pkt_waitTxDone( + const UI32_T unit, + const HAL_TAU_PKT_TX_CHANNEL_T channel, + HAL_TAU_PKT_TX_SW_GPD_T *ptr_sw_gpd) +{ + NPS_ERROR_NO_T rc = NPS_E_OK; + HAL_TAU_PKT_TX_CB_T *ptr_tx_cb = HAL_TAU_PKT_GET_TX_CB_PTR(unit); + HAL_TAU_PKT_TX_PDMA_T *ptr_tx_pdma = HAL_TAU_PKT_GET_TX_PDMA_PTR(unit, channel); + volatile HAL_TAU_PKT_TX_GPD_T *ptr_tx_gpd = NULL; + UI32_T last_gpd_idx = 0; + UI32_T loop_cnt = 0; + + if (HAL_TAU_PKT_TX_WAIT_ASYNC == ptr_tx_cb->wait_mode) + { + ; + } + else if (HAL_TAU_PKT_TX_WAIT_SYNC_INTR == ptr_tx_cb->wait_mode) + { + osal_takeSemaphore(&ptr_tx_pdma->sync_intr_sema, HAL_TAU_PKT_PDMA_TX_INTR_TIMEOUT); + /* rc = _hal_tau_pkt_invokeTxGpdCallback(unit, ptr_sw_gpd); */ + } + else if (HAL_TAU_PKT_TX_WAIT_SYNC_POLL == ptr_tx_cb->wait_mode) + { + last_gpd_idx = ptr_tx_pdma->free_idx + ptr_tx_pdma->used_gpd_num; + last_gpd_idx %= ptr_tx_pdma->gpd_num; + ptr_tx_gpd = HAL_TAU_PKT_GET_TX_GPD_PTR(unit, channel, last_gpd_idx); + + while (HAL_TAU_PKT_HWO_HW_OWN == ptr_tx_gpd->hwo) + { + osal_dma_invalidateCache((void *)ptr_tx_gpd, sizeof(HAL_TAU_PKT_TX_GPD_T)); + loop_cnt++; + if (0 == loop_cnt % HAL_TAU_PKT_PDMA_TX_POLL_MAX_LOOP) + { + ptr_tx_cb->cnt.channel[channel].poll_timeout++; + rc = NPS_E_OTHERS; + break; + } + } + if (HAL_TAU_PKT_ECC_ERROR_OCCUR == ptr_tx_gpd->ecce) + { + ptr_tx_cb->cnt.channel[channel].ecc_err++; + } + if (NPS_E_OK == rc) + { + ptr_tx_pdma->free_gpd_num += ptr_tx_pdma->used_gpd_num; + ptr_tx_pdma->used_gpd_num = 0; + ptr_tx_pdma->free_idx = ptr_tx_pdma->used_idx; + /* rc = _hal_tau_pkt_invokeTxGpdCallback(unit, ptr_sw_gpd); */ + } + } + + return (rc); +} + +static NPS_ERROR_NO_T +_hal_tau_pkt_resumeAllIntf( + const UI32_T unit) +{ + struct net_device *ptr_net_dev = NULL; + UI32_T port; + + /* Unregister net devices by id */ + for (port = 0; port < HAL_TAU_PKT_MAX_PORT_NUM; port++) + { + ptr_net_dev = HAL_TAU_PKT_GET_PORT_NETDEV(port); + if (NULL != ptr_net_dev) + { + if (netif_queue_stopped(ptr_net_dev)) + { + netif_wake_queue(ptr_net_dev); + } + } + } + + return (NPS_E_OK); +} + +static NPS_ERROR_NO_T +_hal_tau_pkt_suspendAllIntf( + const UI32_T unit) +{ + struct net_device *ptr_net_dev = NULL; + UI32_T port; + + /* Unregister net devices by id */ + for (port = 0; port < HAL_TAU_PKT_MAX_PORT_NUM; port++) + { + ptr_net_dev = HAL_TAU_PKT_GET_PORT_NETDEV(port); + if (NULL != ptr_net_dev) + { + netif_stop_queue(ptr_net_dev); + } + } + + return (NPS_E_OK); +} + +/* FUNCTION NAME: hal_tau_pkt_sendGpd + * PURPOSE: + * To perform the packet transmission form CPU to the switch. + * INPUT: + * unit -- The unit ID + * channel -- The target TX channel + * ptr_sw_gpd -- Pointer for the SW Tx GPD link list + * OUTPUT: + * None + * RETURN: + * NPS_E_OK -- Successfully perform the transferring. + * NOTES: + * None + */ +NPS_ERROR_NO_T +hal_tau_pkt_sendGpd( + const UI32_T unit, + const HAL_TAU_PKT_TX_CHANNEL_T channel, + HAL_TAU_PKT_TX_SW_GPD_T *ptr_sw_gpd) +{ + NPS_ERROR_NO_T rc = NPS_E_OK; + HAL_TAU_PKT_TX_CB_T *ptr_tx_cb = HAL_TAU_PKT_GET_TX_CB_PTR(unit); + HAL_TAU_PKT_TX_PDMA_T *ptr_tx_pdma = HAL_TAU_PKT_GET_TX_PDMA_PTR(unit, channel); + volatile HAL_TAU_PKT_TX_GPD_T *ptr_tx_gpd = NULL; + HAL_TAU_PKT_TX_SW_GPD_T *ptr_sw_first_gpd = ptr_sw_gpd; + + UI32_T used_idx = 0; + UI32_T used_gpd_num = ptr_sw_gpd->gpd_num; + NPS_IRQ_FLAGS_T irq_flags; + + osal_takeIsrLock(&ptr_tx_pdma->ring_lock, &irq_flags); + + /* If not PDMA error */ + if (FALSE == ptr_tx_pdma->err_flag) + { + /* Make Sure GPD is enough */ + if (ptr_tx_pdma->free_gpd_num >= used_gpd_num) + { + used_idx = ptr_tx_pdma->used_idx; + while (NULL != ptr_sw_gpd) + { + ptr_tx_gpd = HAL_TAU_PKT_GET_TX_GPD_PTR(unit, channel, used_idx); + osal_dma_invalidateCache((void *)ptr_tx_gpd, sizeof(HAL_TAU_PKT_TX_GPD_T)); + + if (HAL_TAU_PKT_HWO_HW_OWN == ptr_tx_gpd->hwo) + { + HAL_TAU_PKT_DBG((HAL_TAU_PKT_DBG_ERR | HAL_TAU_PKT_DBG_TX), + "u=%u, txch=%u, free gpd idx out-of-sync\n", + unit, channel); + rc = NPS_E_TABLE_FULL; + break; + } + + /* Fill in HW-GPD Ring */ + osal_memcpy((void *)ptr_tx_gpd, &ptr_sw_gpd->tx_gpd, sizeof(HAL_TAU_PKT_TX_GPD_T)); + osal_dma_flushCache((void *)ptr_tx_gpd, sizeof(HAL_TAU_PKT_TX_GPD_T)); + + /* next */ + used_idx++; + used_idx %= ptr_tx_pdma->gpd_num; + ptr_sw_gpd = ptr_sw_gpd->ptr_next; + } + + if (HAL_TAU_PKT_TX_WAIT_ASYNC == ptr_tx_cb->wait_mode) + { + /* Fill 1st GPD in SW-GPD Ring */ + ptr_tx_pdma->pptr_sw_gpd_ring[ptr_tx_pdma->used_idx] = ptr_sw_first_gpd; + } + + /* update Tx PDMA */ + ptr_tx_pdma->used_idx = used_idx; + ptr_tx_pdma->used_gpd_num += used_gpd_num; + ptr_tx_pdma->free_gpd_num -= used_gpd_num; + + _hal_tau_pkt_resumeTxChannelReg(unit, channel, used_gpd_num); + ptr_tx_cb->cnt.channel[channel].send_ok++; + + _hal_tau_pkt_waitTxDone(unit, channel, ptr_sw_first_gpd); + + /* reserve 1 packet buffer for each port in case that the suspension is too late */ +#define HAL_TAU_PKT_KNL_TX_RING_AVBL_GPD_LOW (HAL_TAU_PORT_NUM) + if (ptr_tx_pdma->free_gpd_num < HAL_TAU_PKT_KNL_TX_RING_AVBL_GPD_LOW) + { + HAL_TAU_PKT_DBG(HAL_TAU_PKT_DBG_TX, + "u=%u, txch=%u, tx avbl gpd < %d, suspend all netdev\n", + unit, channel, HAL_TAU_PKT_KNL_TX_RING_AVBL_GPD_LOW); + _hal_tau_pkt_suspendAllIntf(unit); + } + } + else + { + rc = NPS_E_TABLE_FULL; + } + } + else + { + HAL_TAU_PKT_DBG((HAL_TAU_PKT_DBG_ERR | HAL_TAU_PKT_DBG_TX), + "u=%u, txch=%u, pdma hw err\n", + unit, channel); + rc = NPS_E_OTHERS; + } + + osal_giveIsrLock(&ptr_tx_pdma->ring_lock, &irq_flags); + + return (rc); +} + +/* ----------------------------------------------------------------------------------- pkt_srv */ +/* ----------------------------------------------------------------------------------- Rx Init */ +static NPS_ERROR_NO_T +_hal_tau_pkt_rxStop( + const UI32_T unit) +{ + NPS_ERROR_NO_T rc = NPS_E_OK; + HAL_TAU_PKT_RX_CHANNEL_T channel = 0; + HAL_TAU_PKT_RX_CB_T *ptr_rx_cb = HAL_TAU_PKT_GET_RX_CB_PTR(unit); + HAL_TAU_PKT_DRV_CB_T *ptr_cb = HAL_TAU_PKT_GET_DRV_CB_PTR(unit); + HAL_TAU_PKT_RX_PDMA_T *ptr_rx_pdma = NULL; + + /* Check if Rx is already stopped*/ + if (0 == (ptr_cb->init_flag & HAL_TAU_PKT_INIT_RX_START)) + { + HAL_TAU_PKT_DBG((HAL_TAU_PKT_DBG_RX | HAL_TAU_PKT_DBG_ERR), + "u=%u, rx stop failed, not started\n", unit); + return (NPS_E_OK); + } + + /* Check if PKT Drv/Task were de-init before stopping Rx */ + /* Currently, we help to stop Rx when deinit Drv/Task, so it shouldn't enter below logic */ + if ((0 == (ptr_cb->init_flag & HAL_TAU_PKT_INIT_TASK)) || + (0 == (ptr_cb->init_flag & HAL_TAU_PKT_INIT_DRV))) + { + HAL_TAU_PKT_DBG((HAL_TAU_PKT_DBG_RX | HAL_TAU_PKT_DBG_ERR), + "u=%u, rx stop failed, pkt task & pkt drv not init\n", unit); + return (NPS_E_OK); + } + + /* Deinit Rx PDMA and free buf for Rx GPD */ + for (channel = 0; channel < HAL_TAU_PKT_RX_CHANNEL_LAST; channel++) + { + ptr_rx_pdma = HAL_TAU_PKT_GET_RX_PDMA_PTR(unit, channel); + + osal_takeSemaphore(&ptr_rx_pdma->sema, NPS_SEMAPHORE_WAIT_FOREVER); + _hal_tau_pkt_stopRxChannelReg(unit, channel); + rc = _hal_tau_pkt_deinitRxPdmaRingBuf(unit, channel); + osal_giveSemaphore(&ptr_rx_pdma->sema); + } + + /* Return user thread */ + ptr_rx_cb->running = FALSE; + ptr_cb->init_flag &= (~HAL_TAU_PKT_INIT_RX_START); + + HAL_TAU_PKT_DBG(HAL_TAU_PKT_DBG_RX, + "u=%u, rx stop done, init flag=0x%x\n", unit, ptr_cb->init_flag); + + osal_triggerEvent(&ptr_rx_cb->sync_sema); + + return (rc); +} + +static NPS_ERROR_NO_T +_hal_tau_pkt_rxStart( + const UI32_T unit) +{ + NPS_ERROR_NO_T rc = NPS_E_OK; + HAL_TAU_PKT_RX_CHANNEL_T channel = 0; + HAL_TAU_PKT_RX_CB_T *ptr_rx_cb = HAL_TAU_PKT_GET_RX_CB_PTR(unit); + HAL_TAU_PKT_DRV_CB_T *ptr_cb = HAL_TAU_PKT_GET_DRV_CB_PTR(unit); + HAL_TAU_PKT_RX_PDMA_T *ptr_rx_pdma = NULL; + + if (0 != (ptr_cb->init_flag & HAL_TAU_PKT_INIT_RX_START)) + { + HAL_TAU_PKT_DBG((HAL_TAU_PKT_DBG_RX | HAL_TAU_PKT_DBG_ERR), + "u=%u, rx start failed, already started\n", unit); + return (NPS_E_OK); + } + + /* init Rx PDMA and alloc buf for Rx GPD */ + for (channel = 0; channel < HAL_TAU_PKT_RX_CHANNEL_LAST; channel++) + { + ptr_rx_pdma = HAL_TAU_PKT_GET_RX_PDMA_PTR(unit, channel); + + osal_takeSemaphore(&ptr_rx_pdma->sema, NPS_SEMAPHORE_WAIT_FOREVER); + rc = _hal_tau_pkt_initRxPdmaRingBuf(unit, channel); + if (NPS_E_OK == rc) + { + ptr_rx_pdma->cur_idx = 0; + _hal_tau_pkt_startRxChannelReg(unit, channel, ptr_rx_pdma->gpd_num); + } + osal_giveSemaphore(&ptr_rx_pdma->sema); + } + + /* enable to dequeue rx packets */ + ptr_rx_cb->running = TRUE; + + /* set the flag to record init state */ + ptr_cb->init_flag |= HAL_TAU_PKT_INIT_RX_START; + + HAL_TAU_PKT_DBG(HAL_TAU_PKT_DBG_RX, + "u=%u, rx start done, init flag=0x%x\n", unit, ptr_cb->init_flag); + return (rc); +} + +/* FUNCTION NAME: hal_tau_pkt_setRxKnlConfig + * PURPOSE: + * 1. To stop the Rx channel and deinit the Rx subsystem. + * 2. To init the Rx subsystem and start the Rx channel. + * 3. To restart the Rx subsystem + * INPUT: + * unit -- The unit ID + * ptr_cookie -- Pointer of the RX cookie + * OUTPUT: + * None + * RETURN: + * NPS_E_OK -- Successfully configure the RX parameters. + * NPS_E_OTHERS -- Configure the parameter failed. + * NOTES: + * + */ +NPS_ERROR_NO_T +hal_tau_pkt_setRxKnlConfig( + const UI32_T unit, + HAL_TAU_PKT_IOCTL_RX_COOKIE_T *ptr_cookie) +{ + NPS_ERROR_NO_T rc = NPS_E_OK; + HAL_TAU_PKT_RX_CB_T *ptr_rx_cb = HAL_TAU_PKT_GET_RX_CB_PTR(unit); + HAL_TAU_PKT_DRV_CB_T *ptr_cb = HAL_TAU_PKT_GET_DRV_CB_PTR(unit); + HAL_TAU_PKT_IOCTL_RX_TYPE_T rx_type = HAL_TAU_PKT_IOCTL_RX_TYPE_LAST; + + osal_io_copyFromUser(&rx_type, &ptr_cookie->rx_type, sizeof(HAL_TAU_PKT_IOCTL_RX_TYPE_T)); + + if (HAL_TAU_PKT_IOCTL_RX_TYPE_DEINIT == rx_type) + { + _hal_tau_pkt_rxStop(unit); + } + if (HAL_TAU_PKT_IOCTL_RX_TYPE_INIT == rx_type) + { + /* To prevent buffer size from being on-the-fly changed */ + if (0 != (ptr_cb->init_flag & HAL_TAU_PKT_INIT_RX_START)) + { + HAL_TAU_PKT_DBG((HAL_TAU_PKT_DBG_RX | HAL_TAU_PKT_DBG_ERR), + "u=%u, rx stop failed, not started\n", unit); + return (NPS_E_OK); + } + + osal_io_copyFromUser(&ptr_rx_cb->buf_len, &ptr_cookie->buf_len, sizeof(UI32_T)); + _hal_tau_pkt_rxStart(unit); + } + + return (rc); +} + +/* FUNCTION NAME: hal_tau_pkt_getRxKnlConfig + * PURPOSE: + * To get the Rx subsystem configuration. + * INPUT: + * unit -- The unit ID + * ptr_cookie -- Pointer of the RX cookie + * OUTPUT: + * None + * RETURN: + * NPS_E_OK -- Successfully configure the RX parameters. + * NPS_E_OTHERS -- Configure the parameter failed. + * NOTES: + * + */ +NPS_ERROR_NO_T +hal_tau_pkt_getRxKnlConfig( + const UI32_T unit, + HAL_TAU_PKT_IOCTL_RX_COOKIE_T *ptr_cookie) +{ + HAL_TAU_PKT_RX_CB_T *ptr_rx_cb = HAL_TAU_PKT_GET_RX_CB_PTR(unit); + + osal_io_copyToUser(&ptr_cookie->buf_len, &ptr_rx_cb->buf_len, sizeof(UI32_T)); + + return (NPS_E_OK); +} + +/* ----------------------------------------------------------------------------------- Deinit */ +/* FUNCTION NAME: hal_tau_pkt_deinitTask + * PURPOSE: + * To de-initialize the Task for packet module. + * INPUT: + * unit -- The unit ID + * OUTPUT: + * None + * RETURN: + * NPS_E_OK -- Successfully dinitialize the control block. + * NPS_E_OTHERS -- Initialize the control block failed. + * NOTES: + * None + */ +NPS_ERROR_NO_T +hal_tau_pkt_deinitTask( + const UI32_T unit) +{ + HAL_TAU_PKT_DRV_CB_T *ptr_cb = HAL_TAU_PKT_GET_DRV_CB_PTR(unit); + HAL_TAU_PKT_TX_CB_T *ptr_tx_cb = HAL_TAU_PKT_GET_TX_CB_PTR(unit); + HAL_TAU_PKT_RX_CB_T *ptr_rx_cb = HAL_TAU_PKT_GET_RX_CB_PTR(unit); + UI32_T channel = 0; + + if (0 == (ptr_cb->init_flag & HAL_TAU_PKT_INIT_TASK)) + { + HAL_TAU_PKT_DBG((HAL_TAU_PKT_DBG_RX | HAL_TAU_PKT_DBG_ERR), + "u=%u, rx stop failed, not started\n", unit); + return (NPS_E_OK); + } + + /* Need to stop Rx before de-init Task */ + if (0 != (ptr_cb->init_flag & HAL_TAU_PKT_INIT_RX_START)) + { + HAL_TAU_PKT_DBG((HAL_TAU_PKT_DBG_RX | HAL_TAU_PKT_DBG_ERR), + "u=%u, pkt task deinit failed, rx not stop\n", unit); + + _hal_tau_pkt_rxStop(unit); + } + + /* Make the Rx IOCTL from userspace return back*/ + osal_triggerEvent(&ptr_rx_cb->sync_sema); + + /* Destroy txTask */ + if (HAL_TAU_PKT_TX_WAIT_ASYNC == ptr_tx_cb->wait_mode) + { + ptr_tx_cb->running = FALSE; + osal_triggerEvent(&ptr_tx_cb->sync_sema); + } + + /* Destroy handleRxDoneTask */ + for (channel = 0; channel < HAL_TAU_PKT_RX_CHANNEL_LAST; channel++) + { + osal_stopThread(&ptr_rx_cb->isr_task_id[channel]); + osal_triggerEvent(HAL_TAU_PKT_RCH_EVENT(unit, channel)); + osal_destroyThread(&ptr_rx_cb->isr_task_id[channel]); + } + + /* Destroy handleTxDoneTask */ + for (channel = 0; channel < HAL_TAU_PKT_TX_CHANNEL_LAST; channel++) + { + osal_stopThread(&ptr_tx_cb->isr_task_id[channel]); + osal_triggerEvent(HAL_TAU_PKT_TCH_EVENT(unit, channel)); + osal_destroyThread(&ptr_tx_cb->isr_task_id[channel]); + } + + /* Destroy handleErrorTask */ + osal_stopThread(&ptr_cb->err_task_id); + osal_triggerEvent(HAL_TAU_PKT_ERR_EVENT(unit)); + osal_destroyThread(&ptr_cb->err_task_id); + + /* Set the flag to record init state */ + ptr_cb->init_flag &= (~HAL_TAU_PKT_INIT_TASK); + + HAL_TAU_PKT_DBG(HAL_TAU_PKT_DBG_RX, + "u=%u, pkt task deinit done, init flag=0x%x\n", + unit, ptr_cb->init_flag); + + return (NPS_E_OK); +} + +/* FUNCTION NAME: _hal_tau_pkt_deinitTxPdma + * PURPOSE: + * To de-initialize the Tx PDMA configuration of the specified channel. + * INPUT: + * unit -- The unit ID + * channel -- The target Tx channel + * OUTPUT: + * None + * RETURN: + * NPS_E_OK -- Successfully de-init the Tx PDMA. + * NPS_E_OTHERS -- De-init the Tx PDMA failed. + * NOTES: + * None + */ +static NPS_ERROR_NO_T +_hal_tau_pkt_deinitTxPdma( + const UI32_T unit, + const HAL_TAU_PKT_TX_CHANNEL_T channel) +{ + HAL_TAU_PKT_TX_CB_T *ptr_tx_cb = HAL_TAU_PKT_GET_TX_CB_PTR(unit); + HAL_TAU_PKT_TX_PDMA_T *ptr_tx_pdma = HAL_TAU_PKT_GET_TX_PDMA_PTR(unit, channel); + NPS_IRQ_FLAGS_T irg_flags; + + _hal_tau_pkt_stopTxChannelReg(unit, channel); + + /* Free DMA and flush queue */ + osal_takeIsrLock(&ptr_tx_pdma->ring_lock, &irg_flags); + + osal_dma_free(ptr_tx_pdma->ptr_gpd_start_addr); + + if (HAL_TAU_PKT_TX_WAIT_ASYNC == ptr_tx_cb->wait_mode) + { + osal_free(ptr_tx_pdma->pptr_sw_gpd_ring); + osal_free(ptr_tx_pdma->pptr_sw_gpd_bulk); + } + else if (HAL_TAU_PKT_TX_WAIT_SYNC_INTR == ptr_tx_cb->wait_mode) + { + osal_destroySemaphore(&ptr_tx_pdma->sync_intr_sema); + } + + osal_giveIsrLock(&ptr_tx_pdma->ring_lock, &irg_flags); + + osal_destroyIsrLock(&ptr_tx_pdma->ring_lock); + + return (NPS_E_OK); +} + +/* FUNCTION NAME: _hal_tau_pkt_deinitRxPdma + * PURPOSE: + * To de-initialize the Rx PDMA configuration of the specified channel. + * INPUT: + * unit -- The unit ID + * channel -- The target Rx channel + * OUTPUT: + * None + * RETURN: + * NPS_E_OK -- Successfully de-init the Rx PDMA. + * NOTES: + * None + */ +static NPS_ERROR_NO_T +_hal_tau_pkt_deinitRxPdma( + const UI32_T unit, + const HAL_TAU_PKT_RX_CHANNEL_T channel) +{ + HAL_TAU_PKT_RX_PDMA_T *ptr_rx_pdma = HAL_TAU_PKT_GET_RX_PDMA_PTR(unit, channel); + + /* Free DMA */ + osal_takeSemaphore(&ptr_rx_pdma->sema, NPS_SEMAPHORE_WAIT_FOREVER); + osal_dma_free(ptr_rx_pdma->ptr_gpd_start_addr); + osal_giveSemaphore(&ptr_rx_pdma->sema); + osal_destroySemaphore(&ptr_rx_pdma->sema); + + return (NPS_E_OK); +} + +/* FUNCTION NAME: _hal_tau_pkt_deinitPktCb + * PURPOSE: + * To de-init the control block of Drv. + * INPUT: + * unit -- The unit ID + * OUTPUT: + * None + * RETURN: + * NPS_E_OK -- Successfully de-init the control block. + * NOTES: + * None + */ +static NPS_ERROR_NO_T +_hal_tau_pkt_deinitPktCb( + const UI32_T unit) +{ + HAL_TAU_PKT_DRV_CB_T *ptr_cb = HAL_TAU_PKT_GET_DRV_CB_PTR(unit); + UI32_T idx = 0, vec = sizeof(_hal_tau_pkt_intr_vec) / sizeof(HAL_TAU_PKT_INTR_VEC_T); + + for (idx = 0; idx < vec; idx++) + { + osal_destroyEvent(&_hal_tau_pkt_intr_vec[idx].intr_event); + ptr_cb->intr_bitmap &= ~(_hal_tau_pkt_intr_vec[idx].intr_reg); + } + + /* Unregister PKT interrupt functions */ + osal_mdc_registerIsr(unit, NULL, NULL); + osal_destroyIsrLock(&ptr_cb->intr_lock); + + return (NPS_E_OK); +} + +/* FUNCTION NAME: _hal_tau_pkt_deinitPktTxCb + * PURPOSE: + * To de-init the control block of Tx PDMA. + * INPUT: + * unit -- The unit ID + * OUTPUT: + * None + * RETURN: + * NPS_E_OK -- Successfully de-init the control block. + * NOTES: + * None + */ +static NPS_ERROR_NO_T +_hal_tau_pkt_deinitPktTxCb( + const UI32_T unit) +{ + NPS_ERROR_NO_T rc = NPS_E_OK; + HAL_TAU_PKT_TX_CB_T *ptr_tx_cb = HAL_TAU_PKT_GET_TX_CB_PTR(unit); + HAL_TAU_PKT_TX_CHANNEL_T channel = 0; + + /* Deinitialize TX PDMA sub-system.*/ + for (channel = 0; channel < HAL_TAU_PKT_TX_CHANNEL_LAST; channel++) + { + _hal_tau_pkt_deinitTxPdma(unit, channel); + } + + if (HAL_TAU_PKT_TX_WAIT_ASYNC == ptr_tx_cb->wait_mode) + { + /* Destroy the sync semaphore of txTask */ + osal_destroyEvent(&ptr_tx_cb->sync_sema); + + /* Deinitialize Tx GPD-queue (of first SW-GPD) from handleTxDoneTask to txTask */ + osal_destroySemaphore(&ptr_tx_cb->sw_queue.sema); + osal_que_destroy(&ptr_tx_cb->sw_queue.que_id); + } + + return (rc); +} + +/* FUNCTION NAME: _hal_tau_pkt_deinitPktRxCb + * PURPOSE: + * To de-init the control block of Rx PDMA. + * INPUT: + * unit -- The unit ID + * OUTPUT: + * None + * RETURN: + * NPS_E_OK -- Successfully de-init the control block. + * NOTES: + * None + */ +static NPS_ERROR_NO_T +_hal_tau_pkt_deinitPktRxCb( + const UI32_T unit) +{ + NPS_ERROR_NO_T rc = NPS_E_OK; + HAL_TAU_PKT_RX_CB_T *ptr_rx_cb = HAL_TAU_PKT_GET_RX_CB_PTR(unit); + HAL_TAU_PKT_RX_CHANNEL_T channel = 0; + UI32_T queue = 0; + + /* Deinitialize RX PDMA sub-system */ + for (channel = 0; channel < HAL_TAU_PKT_RX_CHANNEL_LAST; channel++) + { + _hal_tau_pkt_deinitRxPdma(unit, channel); + } + + /* Destroy the sync semaphore of rxTask */ + osal_destroyEvent(&ptr_rx_cb->sync_sema); + + /* Deinitialize Rx GPD-queue (of first SW-GPD) from handleRxDoneTask to rxTask */ + for (queue = 0; queue < HAL_TAU_PKT_RX_QUEUE_NUM; queue++) + { + osal_destroySemaphore(&ptr_rx_cb->sw_queue[queue].sema); + osal_que_destroy(&ptr_rx_cb->sw_queue[queue].que_id); + } + + return (rc); +} + +/* FUNCTION NAME: _hal_tau_pkt_deinitL1Isr + * PURPOSE: + * To de-initialize the PDMA L1 ISR configuration. + * INPUT: + * unit -- The unit ID + * OUTPUT: + * None + * RETURN: + * NPS_E_OK -- Successfully de-initialize for the L1 ISR. + * NOTES: + * None + */ +static NPS_ERROR_NO_T +_hal_tau_pkt_deinitL1Isr( + const UI32_T unit) +{ + UI32_T idx = 0, vec = sizeof(_hal_tau_pkt_intr_vec) / sizeof(HAL_TAU_PKT_INTR_VEC_T); + + for (idx = 0; idx < vec; idx++) + { + _hal_tau_pkt_maskIntr(unit, _hal_tau_pkt_intr_vec[idx].intr_reg); + _hal_tau_pkt_disableIntr(unit, _hal_tau_pkt_intr_vec[idx].intr_reg); + } + + return (NPS_E_OK); +} + +/* FUNCTION NAME: _hal_tau_pkt_deinitL2Isr + * PURPOSE: + * To initialize the PDMA L2 ISR configuration. + * INPUT: + * unit -- The unit ID + * OUTPUT: + * None + * RETURN: + * NPS_E_OK -- Successfully configure for the L2 ISR. + * NPS_E_OTHERS -- Configure failed. + * NOTES: + * None + */ +static NPS_ERROR_NO_T +_hal_tau_pkt_deinitL2Isr( + const UI32_T unit) +{ + HAL_TAU_PKT_L2_ISR_T isr_status = 0x0; + + HAL_TAU_PKT_SET_BITMAP(isr_status, HAL_TAU_PKT_L2_ISR_RCH0); + HAL_TAU_PKT_SET_BITMAP(isr_status, HAL_TAU_PKT_L2_ISR_RCH1); + HAL_TAU_PKT_SET_BITMAP(isr_status, HAL_TAU_PKT_L2_ISR_RCH2); + HAL_TAU_PKT_SET_BITMAP(isr_status, HAL_TAU_PKT_L2_ISR_RCH3); + HAL_TAU_PKT_SET_BITMAP(isr_status, HAL_TAU_PKT_L2_ISR_TCH0); + HAL_TAU_PKT_SET_BITMAP(isr_status, HAL_TAU_PKT_L2_ISR_TCH1); + HAL_TAU_PKT_SET_BITMAP(isr_status, HAL_TAU_PKT_L2_ISR_TCH2); + HAL_TAU_PKT_SET_BITMAP(isr_status, HAL_TAU_PKT_L2_ISR_TCH3); + HAL_TAU_PKT_SET_BITMAP(isr_status, HAL_TAU_PKT_L2_ISR_RX_QID_MAP_ERR); + HAL_TAU_PKT_SET_BITMAP(isr_status, HAL_TAU_PKT_L2_ISR_RX_FRAME_ERR); + + osal_mdc_writePciReg(unit, + HAL_TAU_PKT_GET_MMIO(HAL_TAU_PKT_PDMA_ERR_INT_MASK_SET), + &isr_status, sizeof(UI32_T)); + + isr_status = 0x0; + osal_mdc_writePciReg(unit, + HAL_TAU_PKT_GET_MMIO(HAL_TAU_PKT_PDMA_ERR_INT_EN), + &isr_status, sizeof(UI32_T)); + + return (NPS_E_OK); +} + +/* FUNCTION NAME: hal_tau_pkt_deinitPktDrv + * PURPOSE: + * To invoke the functions to de-initialize the control block for each + * PDMA subsystem. + * INPUT: + * unit -- The unit ID + * OUTPUT: + * None + * RETURN: + * NPS_E_OK -- Successfully de-initialize the control blocks. + * NPS_E_OTHERS -- De-initialize the control blocks failed. + * NOTES: + * None + */ +NPS_ERROR_NO_T +hal_tau_pkt_deinitPktDrv( + const UI32_T unit) +{ + HAL_TAU_PKT_DRV_CB_T *ptr_cb = HAL_TAU_PKT_GET_DRV_CB_PTR(unit); + NPS_ERROR_NO_T rc = NPS_E_OK; + + if (0 == (ptr_cb->init_flag & HAL_TAU_PKT_INIT_DRV)) + { + HAL_TAU_PKT_DBG((HAL_TAU_PKT_DBG_RX | HAL_TAU_PKT_DBG_ERR), + "u=%u, pkt drv deinit failed, not inited\n", unit); + return (NPS_E_OK); + } + + rc = _hal_tau_pkt_deinitL2Isr(unit); + + if (NPS_E_OK == rc) + { + rc = _hal_tau_pkt_deinitL1Isr(unit); + } + if (NPS_E_OK == rc) + { + rc = _hal_tau_pkt_deinitPktRxCb(unit); + } + if (NPS_E_OK == rc) + { + rc = _hal_tau_pkt_deinitPktTxCb(unit); + } + if (NPS_E_OK == rc) + { + rc = _hal_tau_pkt_deinitPktCb(unit); + } + + ptr_cb->init_flag &= (~HAL_TAU_PKT_INIT_DRV); + + + HAL_TAU_PKT_DBG(HAL_TAU_PKT_DBG_COMMON, + "u=%u, pkt drv deinit done, init flag=0x%x\n", + unit, ptr_cb->init_flag); + return (rc); +} + +/* ----------------------------------------------------------------------------------- Init */ +/* FUNCTION NAME: _hal_tau_pkt_handleTxErrStat + * PURPOSE: + * To handle the TX flow control ISR. + * INPUT: + * unit -- The unit ID + * channel -- The target channel + * OUTPUT: + * None + * RETURN: + * NPS_E_OK -- Successfully handle the interrpt. + * NOTES: + * + */ +static NPS_ERROR_NO_T +_hal_tau_pkt_handleTxErrStat( + const UI32_T unit, + const HAL_TAU_PKT_TX_CHANNEL_T channel) +{ + HAL_TAU_PKT_TX_CB_T *ptr_tx_cb = HAL_TAU_PKT_GET_TX_CB_PTR(unit); + HAL_TAU_PKT_TX_PDMA_T *ptr_tx_pdma = HAL_TAU_PKT_GET_TX_PDMA_PTR(unit, channel); + NPS_IRQ_FLAGS_T irg_flags; + + if (HAL_TAU_PKT_TX_WAIT_SYNC_INTR == ptr_tx_cb->wait_mode) + { + /* Notify the TX process to make it release the channel semaphore. */ + osal_giveSemaphore(&ptr_tx_pdma->sync_intr_sema); + } + + /* Set the error flag. */ + + osal_takeIsrLock(&ptr_tx_pdma->ring_lock, &irg_flags); + ptr_tx_pdma->err_flag = TRUE; + osal_giveIsrLock(&ptr_tx_pdma->ring_lock, &irg_flags); + + osal_triggerEvent(HAL_TAU_PKT_TCH_EVENT(unit, channel)); + + return (NPS_E_OK); +} + +/* FUNCTION NAME: _hal_tau_pkt_handleRxErrStat + * PURPOSE: + * To handle the error which occurs in RX channels. + * INPUT: + * unit -- The unit ID + * channel -- The channel where the error occurs + * OUTPUT: + * None + * RETURN: + * NPS_E_OK -- Successfully handle the error situation. + * NOTES: + * + */ +static NPS_ERROR_NO_T +_hal_tau_pkt_handleRxErrStat( + const UI32_T unit, + const HAL_TAU_PKT_RX_CHANNEL_T channel) +{ + HAL_TAU_PKT_RX_PDMA_T *ptr_rx_pdma = HAL_TAU_PKT_GET_RX_PDMA_PTR(unit, channel); + + /* Set the error flag. */ + osal_takeSemaphore(&ptr_rx_pdma->sema, NPS_SEMAPHORE_WAIT_FOREVER); + ptr_rx_pdma->err_flag = TRUE; + osal_giveSemaphore(&ptr_rx_pdma->sema); + + osal_triggerEvent(HAL_TAU_PKT_RCH_EVENT(unit, channel)); + + return (NPS_E_OK); +} + +/* FUNCTION NAME: _hal_tau_pkt_handleTxL2Isr + * PURPOSE: + * To handle the TX L2 interrupt according to the ISR status. + * INPUT: + * unit -- The unit ID + * channel -- The channel where the interrupt occurs + * OUTPUT: + * None + * RETURN: + * NPS_E_OK -- Successfully handle the L2 interrupt. + * NOTES: + * + */ +static NPS_ERROR_NO_T +_hal_tau_pkt_handleTxL2Isr( + const UI32_T unit, + const HAL_TAU_PKT_TX_CHANNEL_T channel) +{ + HAL_TAU_PKT_TX_CHANNEL_L2_ISR_T isr_status = 0x0; + HAL_TAU_PKT_TX_CB_T *ptr_tx_cb = HAL_TAU_PKT_GET_TX_CB_PTR(unit); + + osal_mdc_readPciReg(unit, + HAL_TAU_PKT_GET_PDMA_TCH_REG(HAL_TAU_PKT_GET_MMIO(HAL_TAU_PKT_PDMA_TCH_INT_STAT), channel), + &isr_status, sizeof(isr_status)); + + _hal_tau_pkt_maskAllTxL2IsrReg(unit, channel); + + if (0 != (isr_status & HAL_TAU_PKT_TX_CHANNEL_L2_ISR_GPD_HWO_ERROR)) + { + HAL_TAU_PKT_DBG((HAL_TAU_PKT_DBG_ERR | HAL_TAU_PKT_DBG_TX), + "u=%u, txch=%u, pdma gpd hwo err\n", unit, channel); + _hal_tau_pkt_clearTxL2IsrStatusReg(unit, channel, HAL_TAU_PKT_TX_CHANNEL_L2_ISR_GPD_HWO_ERROR); + ptr_tx_cb->cnt.channel[channel].gpd_hwo_err++; + _hal_tau_pkt_handleTxErrStat(unit, channel); + } + if (0 != (isr_status & HAL_TAU_PKT_TX_CHANNEL_L2_ISR_GPD_CHKSM_ERROR)) + { + HAL_TAU_PKT_DBG((HAL_TAU_PKT_DBG_ERR | HAL_TAU_PKT_DBG_TX), + "u=%u, txch=%u, pdma gpd chksum err\n", unit, channel); + _hal_tau_pkt_clearTxL2IsrStatusReg(unit, channel, HAL_TAU_PKT_TX_CHANNEL_L2_ISR_GPD_CHKSM_ERROR); + ptr_tx_cb->cnt.channel[channel].gpd_chksm_err++; + _hal_tau_pkt_handleTxErrStat(unit, channel); + } + if (0 != (isr_status & HAL_TAU_PKT_TX_CHANNEL_L2_ISR_GPD_NO_OVFL_ERROR)) + { + HAL_TAU_PKT_DBG((HAL_TAU_PKT_DBG_ERR | HAL_TAU_PKT_DBG_TX), + "u=%u, txch=%u, pdma gpd num overflow err\n", unit, channel); + _hal_tau_pkt_clearTxL2IsrStatusReg(unit, channel, HAL_TAU_PKT_TX_CHANNEL_L2_ISR_GPD_NO_OVFL_ERROR); + ptr_tx_cb->cnt.channel[channel].gpd_no_ovfl_err++; + _hal_tau_pkt_handleTxErrStat(unit, channel); + } + if (0 != (isr_status & HAL_TAU_PKT_TX_CHANNEL_L2_ISR_GPD_DMA_READ_ERROR)) + { + HAL_TAU_PKT_DBG((HAL_TAU_PKT_DBG_ERR | HAL_TAU_PKT_DBG_TX), + "u=%u, txch=%u, pdma gpd dma read err\n", unit, channel); + _hal_tau_pkt_clearTxL2IsrStatusReg(unit, channel, HAL_TAU_PKT_TX_CHANNEL_L2_ISR_GPD_DMA_READ_ERROR); + ptr_tx_cb->cnt.channel[channel].gpd_dma_read_err++; + _hal_tau_pkt_handleTxErrStat(unit, channel); + } + if (0 != (isr_status & HAL_TAU_PKT_TX_CHANNEL_L2_ISR_BUF_SIZE_ERROR)) + { + HAL_TAU_PKT_DBG((HAL_TAU_PKT_DBG_ERR | HAL_TAU_PKT_DBG_TX), + "u=%u, txch=%u, pdma buf size err\n", unit, channel); + _hal_tau_pkt_clearTxL2IsrStatusReg(unit, channel, HAL_TAU_PKT_TX_CHANNEL_L2_ISR_BUF_SIZE_ERROR); + ptr_tx_cb->cnt.channel[channel].buf_size_err++; + _hal_tau_pkt_handleTxErrStat(unit, channel); + } + if (0 != (isr_status & HAL_TAU_PKT_TX_CHANNEL_L2_ISR_RUNT_ERROR)) + { + HAL_TAU_PKT_DBG(HAL_TAU_PKT_DBG_TX, + "u=%u, txch=%u, pdma pkt runt\n", unit, channel); + _hal_tau_pkt_clearTxL2IsrStatusReg(unit, channel, HAL_TAU_PKT_TX_CHANNEL_L2_ISR_RUNT_ERROR); + ptr_tx_cb->cnt.channel[channel].runt_err++; + } + if (0 != (isr_status & HAL_TAU_PKT_TX_CHANNEL_L2_ISR_OVSZ_ERROR)) + { + HAL_TAU_PKT_DBG(HAL_TAU_PKT_DBG_TX, + "u=%u, txch=%u, pdma pkt over size\n", unit, channel);; + _hal_tau_pkt_clearTxL2IsrStatusReg(unit, channel, HAL_TAU_PKT_TX_CHANNEL_L2_ISR_OVSZ_ERROR); + ptr_tx_cb->cnt.channel[channel].ovsz_err++; + } + if (0 != (isr_status & HAL_TAU_PKT_TX_CHANNEL_L2_ISR_LEN_MISMATCH_ERROR)) + { + HAL_TAU_PKT_DBG((HAL_TAU_PKT_DBG_ERR | HAL_TAU_PKT_DBG_TX), + "u=%u, txch=%u, pdma len mismatch err\n", unit, channel); + _hal_tau_pkt_clearTxL2IsrStatusReg(unit, channel, HAL_TAU_PKT_TX_CHANNEL_L2_ISR_LEN_MISMATCH_ERROR); + ptr_tx_cb->cnt.channel[channel].len_mismatch_err++; + } + if (0 != (isr_status & HAL_TAU_PKT_TX_CHANNEL_L2_ISR_PKTPL_DMA_READ_ERROR)) + { + HAL_TAU_PKT_DBG((HAL_TAU_PKT_DBG_ERR | HAL_TAU_PKT_DBG_TX), + "u=%u, txch=%u, pdma pkt buf dma read err\n", unit, channel); + _hal_tau_pkt_clearTxL2IsrStatusReg(unit, channel, HAL_TAU_PKT_TX_CHANNEL_L2_ISR_PKTPL_DMA_READ_ERROR); + ptr_tx_cb->cnt.channel[channel].pktpl_dma_read_err++; + _hal_tau_pkt_handleTxErrStat(unit, channel); + } + if (0 != (isr_status & HAL_TAU_PKT_TX_CHANNEL_L2_ISR_COS_ERROR)) + { + HAL_TAU_PKT_DBG((HAL_TAU_PKT_DBG_ERR | HAL_TAU_PKT_DBG_TX), + "u=%u, txch=%u, pdma tx cos err\n", unit, channel); + _hal_tau_pkt_clearTxL2IsrStatusReg(unit, channel, HAL_TAU_PKT_TX_CHANNEL_L2_ISR_COS_ERROR); + ptr_tx_cb->cnt.channel[channel].cos_err++; + } + if (0 != (isr_status & HAL_TAU_PKT_TX_CHANNEL_L2_ISR_GPD_GT255_ERROR)) + { + HAL_TAU_PKT_DBG((HAL_TAU_PKT_DBG_ERR | HAL_TAU_PKT_DBG_TX), + "u=%u, txch=%u, pdma gpd num > 255 err\n", unit, channel); + _hal_tau_pkt_clearTxL2IsrStatusReg(unit, channel, HAL_TAU_PKT_TX_CHANNEL_L2_ISR_GPD_GT255_ERROR); + ptr_tx_cb->cnt.channel[channel].gpd_gt255_err++; + _hal_tau_pkt_handleTxErrStat(unit, channel); + } + if (0 != (isr_status & HAL_TAU_PKT_TX_CHANNEL_L2_ISR_PFC)) + { + HAL_TAU_PKT_DBG(HAL_TAU_PKT_DBG_TX, + "u=%u, txch=%u, pdma flow ctrl\n", unit, channel); + _hal_tau_pkt_clearTxL2IsrStatusReg(unit, channel, HAL_TAU_PKT_TX_CHANNEL_L2_ISR_PFC); + ptr_tx_cb->cnt.channel[channel].pfc++; + } + if (0 != (isr_status & HAL_TAU_PKT_TX_CHANNEL_L2_ISR_CREDIT_UDFL_ERROR)) + { + HAL_TAU_PKT_DBG((HAL_TAU_PKT_DBG_ERR | HAL_TAU_PKT_DBG_TX), + "u=%u, txch=%u, pdma credit underflow err\n", unit, channel); + _hal_tau_pkt_clearTxL2IsrStatusReg(unit, channel, HAL_TAU_PKT_TX_CHANNEL_L2_ISR_CREDIT_UDFL_ERROR); + ptr_tx_cb->cnt.channel[channel].credit_udfl_err++; + _hal_tau_pkt_handleTxErrStat(unit, channel); + } + if (0 != (isr_status & HAL_TAU_PKT_TX_CHANNEL_L2_ISR_DMA_WRITE_ERROR)) + { + HAL_TAU_PKT_DBG((HAL_TAU_PKT_DBG_ERR | HAL_TAU_PKT_DBG_TX), + "u=%u, txch=%u, pdma dma write err\n", unit, channel); + _hal_tau_pkt_clearTxL2IsrStatusReg(unit, channel, HAL_TAU_PKT_TX_CHANNEL_L2_ISR_DMA_WRITE_ERROR); + ptr_tx_cb->cnt.channel[channel].dma_write_err++; + _hal_tau_pkt_handleTxErrStat(unit, channel); + } + if (0 != (isr_status & HAL_TAU_PKT_TX_CHANNEL_L2_ISR_STOP_CMD_CPLT)) + { + HAL_TAU_PKT_DBG(HAL_TAU_PKT_DBG_TX, + "u=%u, txch=%u, pdma stop done\n", unit, channel); + _hal_tau_pkt_clearTxL2IsrStatusReg(unit, channel, HAL_TAU_PKT_TX_CHANNEL_L2_ISR_STOP_CMD_CPLT); + ptr_tx_cb->cnt.channel[channel].sw_issue_stop++; + } + if (0 != isr_status) + { + _hal_tau_pkt_unmaskAllTxL2IsrReg(unit, channel); + } + + return (NPS_E_OK); +} + +/* FUNCTION NAME: _hal_tau_pkt_handleRxL2Isr + * PURPOSE: + * To handle the RX L2 interrupt according to the ISR status. + * INPUT: + * unit -- The unit ID + * channel -- The channel where the interrupt occurs + * OUTPUT: + * None + * RETURN: + * NPS_E_OK -- Successfully handle the L2 interrupt. + * NOTES: + * + */ +static NPS_ERROR_NO_T +_hal_tau_pkt_handleRxL2Isr( + const UI32_T unit, + const HAL_TAU_PKT_RX_CHANNEL_T channel) +{ + HAL_TAU_PKT_RX_CHANNEL_L2_ISR_T isr_status = 0x0; + HAL_TAU_PKT_RX_CB_T *ptr_rx_cb = HAL_TAU_PKT_GET_RX_CB_PTR(unit); + + osal_mdc_readPciReg(unit, + HAL_TAU_PKT_GET_PDMA_RCH_REG(HAL_TAU_PKT_GET_MMIO(HAL_TAU_PKT_PDMA_RCH_INT_STAT), channel), + &isr_status, sizeof(isr_status)); + + _hal_tau_pkt_maskAllRxL2IsrReg(unit, channel); + + if (0 != (isr_status & HAL_TAU_PKT_RX_CHANNEL_L2_ISR_AVAIL_GPD_LOW)) + { + HAL_TAU_PKT_DBG(HAL_TAU_PKT_DBG_RX, + "u=%u, rxch=%u, pdma avbl gpd low\n", unit, channel); + _hal_tau_pkt_clearRxL2IsrStatusReg(unit, channel, HAL_TAU_PKT_RX_CHANNEL_L2_ISR_AVAIL_GPD_LOW); + ptr_rx_cb->cnt.channel[channel].avbl_gpd_low++; + } + if (0 != (isr_status & HAL_TAU_PKT_RX_CHANNEL_L2_ISR_AVAIL_GPD_EMPTY)) + { + HAL_TAU_PKT_DBG(HAL_TAU_PKT_DBG_RX, + "u=%u, rxch=%u, pdma avbl gpd empty\n", unit, channel); + _hal_tau_pkt_clearRxL2IsrStatusReg(unit, channel, HAL_TAU_PKT_RX_CHANNEL_L2_ISR_AVAIL_GPD_EMPTY); + ptr_rx_cb->cnt.channel[channel].avbl_gpd_empty++; + } + if (0 != (isr_status & HAL_TAU_PKT_RX_CHANNEL_L2_ISR_AVAIL_GPD_ERROR)) + { + HAL_TAU_PKT_DBG((HAL_TAU_PKT_DBG_ERR | HAL_TAU_PKT_DBG_RX), + "u=%u, rxch=%u, pdma avbl gpd err\n", unit, channel); + _hal_tau_pkt_clearRxL2IsrStatusReg(unit, channel, HAL_TAU_PKT_RX_CHANNEL_L2_ISR_AVAIL_GPD_ERROR); + ptr_rx_cb->cnt.channel[channel].avbl_gpd_err++; + _hal_tau_pkt_handleRxErrStat(unit, channel); + } + if (0 != (isr_status & HAL_TAU_PKT_RX_CHANNEL_L2_ISR_GPD_CHKSM_ERROR)) + { + HAL_TAU_PKT_DBG((HAL_TAU_PKT_DBG_ERR | HAL_TAU_PKT_DBG_RX), + "u=%u, rxch=%u, pdma gpd chksum err\n", unit, channel); + _hal_tau_pkt_clearRxL2IsrStatusReg(unit, channel, HAL_TAU_PKT_RX_CHANNEL_L2_ISR_GPD_CHKSM_ERROR); + ptr_rx_cb->cnt.channel[channel].gpd_chksm_err++; + _hal_tau_pkt_handleRxErrStat(unit, channel); + } + if (0 != (isr_status & HAL_TAU_PKT_RX_CHANNEL_L2_ISR_DMA_READ_ERROR)) + { + HAL_TAU_PKT_DBG((HAL_TAU_PKT_DBG_ERR | HAL_TAU_PKT_DBG_RX), + "u=%u, rxch=%u, pdma dma read err\n", unit, channel); + _hal_tau_pkt_clearRxL2IsrStatusReg(unit, channel, HAL_TAU_PKT_RX_CHANNEL_L2_ISR_DMA_READ_ERROR); + ptr_rx_cb->cnt.channel[channel].dma_read_err++; + _hal_tau_pkt_handleRxErrStat(unit, channel); + } + if (0 != (isr_status & HAL_TAU_PKT_RX_CHANNEL_L2_ISR_DMA_WRITE_ERROR)) + { + HAL_TAU_PKT_DBG((HAL_TAU_PKT_DBG_ERR | HAL_TAU_PKT_DBG_RX), + "u=%u, rxch=%u, pdma dma write err\n", unit, channel); + _hal_tau_pkt_clearRxL2IsrStatusReg(unit, channel, HAL_TAU_PKT_RX_CHANNEL_L2_ISR_DMA_WRITE_ERROR); + ptr_rx_cb->cnt.channel[channel].dma_write_err++; + _hal_tau_pkt_handleRxErrStat(unit, channel); + } + if (0 != (isr_status & HAL_TAU_PKT_RX_CHANNEL_L2_ISR_STOP_CMD_CPLT)) + { + HAL_TAU_PKT_DBG(HAL_TAU_PKT_DBG_RX, + "u=%u, rxch=%u, pdma stop done\n", unit, channel); + _hal_tau_pkt_clearRxL2IsrStatusReg(unit, channel, HAL_TAU_PKT_RX_CHANNEL_L2_ISR_STOP_CMD_CPLT); + ptr_rx_cb->cnt.channel[channel].sw_issue_stop++; + } + if (0 != (isr_status & HAL_TAU_PKT_RX_CHANNEL_L2_ISR_GPD_GT255_ERROR)) + { + HAL_TAU_PKT_DBG((HAL_TAU_PKT_DBG_ERR | HAL_TAU_PKT_DBG_RX), + "u=%u, rxch=%u, pdma gpd num > 255 err\n", unit, channel); + _hal_tau_pkt_clearRxL2IsrStatusReg(unit, channel, HAL_TAU_PKT_RX_CHANNEL_L2_ISR_GPD_GT255_ERROR); + ptr_rx_cb->cnt.channel[channel].gpd_gt255_err++; + _hal_tau_pkt_handleRxErrStat(unit, channel); + } + if (0 != (isr_status & HAL_TAU_PKT_RX_CHANNEL_L2_ISR_TOD_UNINIT)) + { + HAL_TAU_PKT_DBG((HAL_TAU_PKT_DBG_ERR | HAL_TAU_PKT_DBG_RX), + "u=%u, rxch=%u, pdma tod ununit err\n", unit, channel); + _hal_tau_pkt_clearRxL2IsrStatusReg(unit, channel, HAL_TAU_PKT_RX_CHANNEL_L2_ISR_TOD_UNINIT); + ptr_rx_cb->cnt.channel[channel].tod_uninit++; + _hal_tau_pkt_handleRxErrStat(unit, channel); + } + if (0 != (isr_status & HAL_TAU_PKT_RX_CHANNEL_L2_ISR_PKT_ERROR_DROP)) + { + HAL_TAU_PKT_DBG((HAL_TAU_PKT_DBG_ERR | HAL_TAU_PKT_DBG_RX), + "u=%u, rxch=%u, pdma pkt err drop\n", unit, channel); + _hal_tau_pkt_clearRxL2IsrStatusReg(unit, channel, HAL_TAU_PKT_RX_CHANNEL_L2_ISR_PKT_ERROR_DROP); + ptr_rx_cb->cnt.channel[channel].pkt_err_drop++; + } + if (0 != (isr_status & HAL_TAU_PKT_RX_CHANNEL_L2_ISR_UDSZ_DROP)) + { + HAL_TAU_PKT_DBG(HAL_TAU_PKT_DBG_RX, + "u=%u, rxch=%u, pdma pkt under size\n", unit, channel); + _hal_tau_pkt_clearRxL2IsrStatusReg(unit, channel, HAL_TAU_PKT_RX_CHANNEL_L2_ISR_UDSZ_DROP); + ptr_rx_cb->cnt.channel[channel].udsz_drop++; + } + if (0 != (isr_status & HAL_TAU_PKT_RX_CHANNEL_L2_ISR_OVSZ_DROP)) + { + HAL_TAU_PKT_DBG(HAL_TAU_PKT_DBG_RX, + "u=%u, rxch=%u, pdma pkt over size\n", unit, channel); + _hal_tau_pkt_clearRxL2IsrStatusReg(unit, channel, HAL_TAU_PKT_RX_CHANNEL_L2_ISR_OVSZ_DROP); + ptr_rx_cb->cnt.channel[channel].ovsz_drop++; + } + if (0 != (isr_status & HAL_TAU_PKT_RX_CHANNEL_L2_ISR_CMDQ_OVF_DROP)) + { + HAL_TAU_PKT_DBG(HAL_TAU_PKT_DBG_RX, + "u=%u, rxch=%u, pdma cmdq overflow\n", unit, channel); + _hal_tau_pkt_clearRxL2IsrStatusReg(unit, channel, HAL_TAU_PKT_RX_CHANNEL_L2_ISR_CMDQ_OVF_DROP); + ptr_rx_cb->cnt.channel[channel].cmdq_ovf_drop++; + } + if (0 != (isr_status & HAL_TAU_PKT_RX_CHANNEL_L2_ISR_FIFO_OVF_DROP)) + { + HAL_TAU_PKT_DBG(HAL_TAU_PKT_DBG_RX, + "u=%u, rxch=%u, pdma fifo overflow\n", unit, channel); + _hal_tau_pkt_clearRxL2IsrStatusReg(unit, channel, HAL_TAU_PKT_RX_CHANNEL_L2_ISR_FIFO_OVF_DROP); + ptr_rx_cb->cnt.channel[channel].fifo_ovf_drop++; + } + if (0 != isr_status) + { + _hal_tau_pkt_unmaskAllRxL2IsrReg(unit, channel); + } + + return (NPS_E_OK); +} + +/* FUNCTION NAME: _hal_tau_pkt_handleErrorTask + * PURPOSE: + * To invoke the corresponding handler for the L2 interrupts. + * INPUT: + * ptr_argv -- The unit ID + * OUTPUT: + * None + * RETURN: + * None + * NOTES: + * None + */ +static void +_hal_tau_pkt_handleErrorTask( + void *ptr_argv) +{ + UI32_T unit = (UI32_T)((NPS_HUGE_T)ptr_argv); + HAL_TAU_PKT_L2_ISR_T isr_status = 0x0; + + osal_initRunThread(); + do + { + /* receive Error-ISR */ + osal_waitEvent(HAL_TAU_PKT_ERR_EVENT(unit)); + if (NPS_E_OK != osal_isRunThread()) + { + HAL_TAU_PKT_DBG(HAL_TAU_PKT_DBG_COMMON, + "u=%u, err task destroyed\n", unit); + break; /* deinit-thread */ + } + + osal_mdc_readPciReg(unit, + HAL_TAU_PKT_GET_MMIO(HAL_TAU_PKT_PDMA_ERR_INT_STAT), + &isr_status, sizeof(UI32_T)); + + if (0 != (HAL_TAU_PKT_L2_ISR_RCH0 & isr_status)) + { + HAL_TAU_PKT_DBG(HAL_TAU_PKT_DBG_COMMON, "u=%u, rxch=0, rcv err isr, status=0x%x\n", + unit, isr_status); + _hal_tau_pkt_handleRxL2Isr(unit, HAL_TAU_PKT_RX_CHANNEL_0); + } + if (0 != (HAL_TAU_PKT_L2_ISR_RCH1 & isr_status)) + { + HAL_TAU_PKT_DBG(HAL_TAU_PKT_DBG_COMMON, "u=%u, rxch=1, rcv err isr, status=0x%x\n", + unit, isr_status); + _hal_tau_pkt_handleRxL2Isr(unit, HAL_TAU_PKT_RX_CHANNEL_1); + } + if (0 != (HAL_TAU_PKT_L2_ISR_RCH2 & isr_status)) + { + HAL_TAU_PKT_DBG(HAL_TAU_PKT_DBG_COMMON, "u=%u, rxch=2, rcv err isr, status=0x%x\n", + unit, isr_status); + _hal_tau_pkt_handleRxL2Isr(unit, HAL_TAU_PKT_RX_CHANNEL_2); + } + if (0 != (HAL_TAU_PKT_L2_ISR_RCH3 & isr_status)) + { + HAL_TAU_PKT_DBG(HAL_TAU_PKT_DBG_COMMON, "u=%u, rxch=3, rcv err isr, status=0x%x\n", + unit, isr_status); + _hal_tau_pkt_handleRxL2Isr(unit, HAL_TAU_PKT_RX_CHANNEL_3); + } + if (0 != (HAL_TAU_PKT_L2_ISR_TCH0 & isr_status)) + { + HAL_TAU_PKT_DBG(HAL_TAU_PKT_DBG_COMMON, "u=%u, txch=0, rcv err isr, status=0x%x\n", + unit, isr_status); + _hal_tau_pkt_handleTxL2Isr(unit, HAL_TAU_PKT_TX_CHANNEL_0); + } + if (0 != (HAL_TAU_PKT_L2_ISR_TCH1 & isr_status)) + { + HAL_TAU_PKT_DBG(HAL_TAU_PKT_DBG_COMMON, "u=%u, txch=1, rcv err isr, status=0x%x\n", + unit, isr_status); + _hal_tau_pkt_handleTxL2Isr(unit, HAL_TAU_PKT_TX_CHANNEL_1); + } + if (0 != (HAL_TAU_PKT_L2_ISR_TCH2 & isr_status)) + { + HAL_TAU_PKT_DBG(HAL_TAU_PKT_DBG_COMMON, "u=%u, txch=2, rcv err isr, status=0x%x\n", + unit, isr_status); + _hal_tau_pkt_handleTxL2Isr(unit, HAL_TAU_PKT_TX_CHANNEL_2); + } + if (0 != (HAL_TAU_PKT_L2_ISR_TCH3 & isr_status)) + { + HAL_TAU_PKT_DBG(HAL_TAU_PKT_DBG_COMMON, "u=%u, txch=3, rcv err isr, status=0x%x\n", + unit, isr_status); + _hal_tau_pkt_handleTxL2Isr(unit, HAL_TAU_PKT_TX_CHANNEL_3); + } + if (0 != (HAL_TAU_PKT_L2_ISR_RX_QID_MAP_ERR & isr_status)) + { + HAL_TAU_PKT_DBG(HAL_TAU_PKT_DBG_COMMON, "u=%u, rcv rx qid map err isr, status=0x%x\n", + unit, isr_status); + } + if (0 != (HAL_TAU_PKT_L2_ISR_RX_FRAME_ERR & isr_status)) + { + HAL_TAU_PKT_DBG(HAL_TAU_PKT_DBG_COMMON, "u=%u, rcv rx frame err isr, status=0x%x\n", + unit, isr_status); + } + if (0 != isr_status) + { + osal_mdc_writePciReg(unit, + HAL_TAU_PKT_GET_MMIO(HAL_TAU_PKT_PDMA_ERR_INT_CLR), + &isr_status, sizeof(UI32_T)); + + _hal_tau_pkt_unmaskIntr(unit, HAL_TAU_PKT_ERR_REG(unit)); + } + + } while (NPS_E_OK == osal_isRunThread()); + osal_exitRunThread(); +} + +/* FUNCTION NAME: _hal_tau_pkt_handleTxDoneTask + * PURPOSE: + * To handle the TX done interrupt for the specified TX channel. + * INPUT: + * ptr_argv -- The unit ID and channel ID + * OUTPUT: + * None + * RETURN: + * None + * NOTES: + * None + */ +static void +_hal_tau_pkt_handleTxDoneTask( + void *ptr_argv) +{ + /* cookie or index */ + UI32_T unit = ((HAL_TAU_PKT_ISR_COOKIE_T *)ptr_argv)->unit; + HAL_TAU_PKT_TX_CHANNEL_T channel = (HAL_TAU_PKT_TX_CHANNEL_T) + ((HAL_TAU_PKT_ISR_COOKIE_T *)ptr_argv)->channel; + /* control block */ + HAL_TAU_PKT_TX_CB_T *ptr_tx_cb = HAL_TAU_PKT_GET_TX_CB_PTR(unit); + HAL_TAU_PKT_TX_PDMA_T *ptr_tx_pdma = HAL_TAU_PKT_GET_TX_PDMA_PTR(unit, channel); + volatile HAL_TAU_PKT_TX_GPD_T *ptr_tx_gpd = NULL; + UI32_T first_gpd_idx = 0; /* To record the first GPD */ + UI32_T loop_cnt = 0; + NPS_IRQ_FLAGS_T irg_flags; + unsigned long timeout = 0; + UI32_T bulk_pkt_cnt = 0, idx; + + osal_initRunThread(); + do + { + /* receive Tx-Done-ISR */ + osal_waitEvent(HAL_TAU_PKT_TCH_EVENT(unit, channel)); + if (NPS_E_OK != osal_isRunThread()) + { + HAL_TAU_PKT_DBG(HAL_TAU_PKT_DBG_TX, + "u=%u, txch=%u, tx done task destroyed\n", unit, channel); + break; /* deinit-thread */ + } + + /* protect Tx PDMA + * for sync-intr, the sema is locked by sendGpd + */ + if (HAL_TAU_PKT_TX_WAIT_SYNC_INTR != ptr_tx_cb->wait_mode) + { + osal_takeIsrLock(&ptr_tx_pdma->ring_lock, &irg_flags); + } + + loop_cnt = ptr_tx_pdma->used_gpd_num; + while (loop_cnt > 0) + { + ptr_tx_gpd = HAL_TAU_PKT_GET_TX_GPD_PTR(unit, channel, ptr_tx_pdma->free_idx); + osal_dma_invalidateCache((void *)ptr_tx_gpd, sizeof(HAL_TAU_PKT_TX_GPD_T)); + + /* If hwo=HW, it might be: + * 1. err_flag=TRUE -> HW breakdown -> enque and recover -> break + * 2. err_flag=FALSE -> HW busy -> break + */ + if (HAL_TAU_PKT_HWO_HW_OWN == ptr_tx_gpd->hwo) + { + if (TRUE == ptr_tx_pdma->err_flag) + { + /* flush the incomplete Tx packet */ + if (HAL_TAU_PKT_TX_WAIT_ASYNC == ptr_tx_cb->wait_mode) + { + for (idx = 0; idx < ptr_tx_pdma->gpd_num; idx++) + { + if (NULL != ptr_tx_pdma->pptr_sw_gpd_ring[idx]) + { + ptr_tx_pdma->pptr_sw_gpd_bulk[bulk_pkt_cnt] + = ptr_tx_pdma->pptr_sw_gpd_ring[idx]; + ptr_tx_pdma->pptr_sw_gpd_ring[idx] = NULL; + bulk_pkt_cnt++; + } + } + } + + /* do error recover */ + first_gpd_idx = 0; + if (NPS_E_OK == _hal_tau_pkt_recoverTxPdma(unit, channel)) + { + ptr_tx_pdma->err_flag = FALSE; + ptr_tx_cb->cnt.channel[channel].err_recover++; + } + else + { + HAL_TAU_PKT_DBG((HAL_TAU_PKT_DBG_TX | HAL_TAU_PKT_DBG_ERR), + "u=%u, txch=%u, err recover failed\n", + unit, channel); + } + } + else + { + } + break; + } + + if (HAL_TAU_PKT_TX_WAIT_ASYNC == ptr_tx_cb->wait_mode) + { + /* If hwo=SW and ch=0, record the head of sw gpd in bulk buf */ + if (HAL_TAU_PKT_CH_LAST_GPD == ptr_tx_gpd->ch) + { + ptr_tx_pdma->pptr_sw_gpd_bulk[bulk_pkt_cnt] + = ptr_tx_pdma->pptr_sw_gpd_ring[first_gpd_idx]; + + bulk_pkt_cnt++; + ptr_tx_pdma->pptr_sw_gpd_ring[first_gpd_idx] = NULL; + + /* next SW-GPD must be the head of another PKT->SW-GPD */ + first_gpd_idx = ptr_tx_pdma->free_idx + 1; + first_gpd_idx %= ptr_tx_pdma->gpd_num; + } + } + + if (HAL_TAU_PKT_ECC_ERROR_OCCUR == ptr_tx_gpd->ecce) + { + ptr_tx_cb->cnt.channel[channel].ecc_err++; + } + + /* update Tx PDMA */ + ptr_tx_pdma->free_idx++; + ptr_tx_pdma->free_idx %= ptr_tx_pdma->gpd_num; + ptr_tx_pdma->used_gpd_num--; + ptr_tx_pdma->free_gpd_num++; + loop_cnt--; + } + + /* let the netdev resume Tx */ + _hal_tau_pkt_resumeAllIntf(unit); + + /* update ISR and counter */ + ptr_tx_cb->cnt.channel[channel].tx_done++; + + _hal_tau_pkt_unmaskIntr(unit, HAL_TAU_PKT_TCH_REG(unit, channel)); + + if (HAL_TAU_PKT_TX_WAIT_SYNC_INTR != ptr_tx_cb->wait_mode) + { + osal_giveIsrLock(&ptr_tx_pdma->ring_lock, &irg_flags); + } + else + { + osal_giveSemaphore(&ptr_tx_pdma->sync_intr_sema); + } + + /* enque packet after releasing the spinlock */ + _hal_tau_pkt_txEnQueueBulk(unit, channel, bulk_pkt_cnt); + bulk_pkt_cnt = 0; + + /* prevent this task from executing too long */ + if (!(time_before(jiffies, timeout))) + { + schedule(); + timeout = jiffies + 1; /* continuously free tx descriptor for 1 tick */ + } + + } while (NPS_E_OK == osal_isRunThread()); + osal_exitRunThread(); +} + +/* FUNCTION NAME: _hal_tau_pkt_handleRxDoneTask + * PURPOSE: + * To handle the RX done interrupt for the specified RX channel. + * INPUT: + * ptr_argv -- The unit ID and channel ID + * OUTPUT: + * None + * RETURN: + * None + * NOTES: + * None + */ +static void +_hal_tau_pkt_handleRxDoneTask( + void *ptr_argv) +{ + /* cookie or index */ + UI32_T unit = ((HAL_TAU_PKT_ISR_COOKIE_T *)ptr_argv)->unit; + HAL_TAU_PKT_RX_CHANNEL_T channel = (HAL_TAU_PKT_RX_CHANNEL_T) + ((HAL_TAU_PKT_ISR_COOKIE_T *)ptr_argv)->channel; + + /* control block */ + HAL_TAU_PKT_RX_CB_T *ptr_rx_cb = HAL_TAU_PKT_GET_RX_CB_PTR(unit); + HAL_TAU_PKT_RX_PDMA_T *ptr_rx_pdma = HAL_TAU_PKT_GET_RX_PDMA_PTR(unit, channel); + volatile HAL_TAU_PKT_RX_GPD_T *ptr_rx_gpd = NULL; + + BOOL_T first = TRUE; + BOOL_T last = FALSE; + HAL_TAU_PKT_RX_SW_GPD_T *ptr_sw_gpd = NULL; + HAL_TAU_PKT_RX_SW_GPD_T *ptr_sw_first_gpd = NULL; + UI32_T loop_cnt = 0; + unsigned long timeout = 0; + + osal_initRunThread(); + do + { + /* receive Rx-Done-ISR */ + osal_waitEvent(HAL_TAU_PKT_RCH_EVENT(unit, channel)); + if (NPS_E_OK != osal_isRunThread()) + { + HAL_TAU_PKT_DBG(HAL_TAU_PKT_DBG_RX, + "u=%u, rxch=%u, rx done task destroyed\n", unit, channel); + break; /* deinit-thread */ + } + + /* check if Rx-system is inited */ + if (0 == ptr_rx_cb->buf_len) + { + HAL_TAU_PKT_DBG((HAL_TAU_PKT_DBG_RX | HAL_TAU_PKT_DBG_ERR), + "u=%u, rxch=%u, rx gpd buf len=0\n", + unit, channel); + continue; + } + + /* protect Rx PDMA */ + osal_takeSemaphore(&ptr_rx_pdma->sema, NPS_SEMAPHORE_WAIT_FOREVER); + loop_cnt = ptr_rx_pdma->gpd_num; + while (loop_cnt > 0) + { + ptr_rx_gpd = HAL_TAU_PKT_GET_RX_GPD_PTR(unit, channel, ptr_rx_pdma->cur_idx); + osal_dma_invalidateCache((void *)ptr_rx_gpd, sizeof(HAL_TAU_PKT_RX_GPD_T)); + + /* If hwo=HW, it might be: + * 1. err_flag=TRUE -> HW breakdown -> enque and recover -> break + * 2. err_flag=FALSE -> HW busy -> break + */ + if (HAL_TAU_PKT_HWO_HW_OWN == ptr_rx_gpd->hwo) + { + if (TRUE == ptr_rx_pdma->err_flag) + { + /* free the last incomplete Rx packet */ + if ((NULL != ptr_sw_first_gpd) && + (NULL != ptr_sw_gpd)) + { + ptr_sw_gpd->ptr_next = NULL; + ptr_sw_first_gpd->rx_complete = FALSE; + _hal_tau_pkt_rxEnQueue(unit, channel, ptr_sw_first_gpd); + } + + /* do error recover */ + first = TRUE; + last = FALSE; + if (NPS_E_OK == _hal_tau_pkt_recoverRxPdma(unit, channel)) + { + ptr_rx_pdma->err_flag = FALSE; + ptr_rx_cb->cnt.channel[channel].err_recover++; + } + else + { + HAL_TAU_PKT_DBG((HAL_TAU_PKT_DBG_RX | HAL_TAU_PKT_DBG_ERR), + "u=%u, rxch=%u, err recover failed\n", + unit, channel); + } + } + else + { + } + break; + } + + /* Move HW-GPD to SW-GPD and append to a link-list */ + if (TRUE == first) + { + ptr_sw_first_gpd = (HAL_TAU_PKT_RX_SW_GPD_T *)osal_alloc(sizeof(HAL_TAU_PKT_RX_SW_GPD_T)); + ptr_sw_gpd = ptr_sw_first_gpd; + if (NULL != ptr_sw_gpd) + { + memcpy(&ptr_sw_gpd->rx_gpd, (void *)ptr_rx_gpd, sizeof(HAL_TAU_PKT_RX_GPD_T)); + first = FALSE; + } + else + { + ptr_rx_cb->cnt.no_memory++; + HAL_TAU_PKT_DBG((HAL_TAU_PKT_DBG_RX | HAL_TAU_PKT_DBG_ERR), + "u=%u, rxch=%u, alloc 1st sw gpd failed, size=%d\n", + unit, channel, sizeof(HAL_TAU_PKT_RX_SW_GPD_T)); + break; + } + } + else + { + ptr_sw_gpd->ptr_next = (HAL_TAU_PKT_RX_SW_GPD_T *)osal_alloc(sizeof(HAL_TAU_PKT_RX_SW_GPD_T)); + ptr_sw_gpd = ptr_sw_gpd->ptr_next; + if (NULL != ptr_sw_gpd) + { + memcpy(&ptr_sw_gpd->rx_gpd, (void *)ptr_rx_gpd, sizeof(HAL_TAU_PKT_RX_GPD_T)); + } + else + { + ptr_rx_cb->cnt.no_memory++; + HAL_TAU_PKT_DBG((HAL_TAU_PKT_DBG_RX | HAL_TAU_PKT_DBG_ERR), + "u=%u, rxch=%u, alloc mid sw gpd failed, size=%d\n", + unit, channel, sizeof(HAL_TAU_PKT_RX_SW_GPD_T)); + break; + } + } + + ptr_sw_gpd->ptr_cookie = ptr_rx_pdma->pptr_skb_ring[ptr_rx_pdma->cur_idx]; + + /* If hwo=SW and ch=0, enque SW-GPD and signal rxTask */ + if (HAL_TAU_PKT_CH_LAST_GPD == ptr_rx_gpd->ch) + { + last = TRUE; + } + + /* If hwo=SW and ch=*, re-alloc-buf and resume */ + while (NPS_E_OK != _hal_tau_pkt_allocRxPayloadBuf(unit, channel, ptr_rx_pdma->cur_idx)) + { + ptr_rx_cb->cnt.no_memory++; + HAL_TAU_PKT_ALLOC_MEM_RETRY_SLEEP(); + } + ptr_rx_gpd->ioc = HAL_TAU_PKT_IOC_HAS_INTR; + ptr_rx_gpd->hwo = HAL_TAU_PKT_HWO_HW_OWN; + osal_dma_flushCache((void *)ptr_rx_gpd, sizeof(HAL_TAU_PKT_RX_GPD_T)); + + /* Enque the SW-GPD to rxTask */ + if (TRUE == last) + { + ptr_sw_gpd->ptr_next = NULL; + ptr_sw_first_gpd->rx_complete = TRUE; + _hal_tau_pkt_rxEnQueue(unit, channel, ptr_sw_first_gpd); + + /* To rebuild the SW GPD link list */ + first = TRUE; + last = FALSE; + } + + _hal_tau_pkt_resumeRxChannelReg(unit, channel, 1); + + /* update Rx PDMA */ + ptr_rx_pdma->cur_idx++; + ptr_rx_pdma->cur_idx %= ptr_rx_pdma->gpd_num; + loop_cnt--; + } + + osal_giveSemaphore(&ptr_rx_pdma->sema); + + /* update ISR and counter */ + ptr_rx_cb->cnt.channel[channel].rx_done++; + + _hal_tau_pkt_unmaskIntr(unit, HAL_TAU_PKT_RCH_REG(unit, channel)); + + /* prevent this task from executing too long */ + if (!(time_before(jiffies, timeout))) + { + schedule(); + timeout = jiffies + 1; /* continuously rx for 1 tick */ + } + + } while (NPS_E_OK == osal_isRunThread()); + osal_exitRunThread(); +} + +static void +_hal_tau_pkt_net_dev_tx_callback( + const UI32_T unit, + HAL_TAU_PKT_TX_SW_GPD_T *ptr_sw_gpd, + struct sk_buff *ptr_skb) +{ + NPS_ADDR_T phy_addr = 0; + + /* unmap dma */ + phy_addr = NPS_ADDR_32_TO_64(ptr_sw_gpd->tx_gpd.data_buf_addr_hi, ptr_sw_gpd->tx_gpd.data_buf_addr_lo); + osal_skb_unmapDma(phy_addr, ptr_skb->len, DMA_TO_DEVICE); + + /* free skb */ + osal_skb_free(ptr_skb); + + /* free gpd */ + osal_free(ptr_sw_gpd); +} + +/* FUNCTION NAME: hal_tau_pkt_initTask + * PURPOSE: + * To initialize the Task for packet module. + * INPUT: + * unit -- The unit ID + * OUTPUT: + * None + * RETURN: + * NPS_E_OK -- Successfully dinitialize the control block. + * NPS_E_OTHERS -- Initialize the control block failed. + * NOTES: + * None + */ +NPS_ERROR_NO_T +hal_tau_pkt_initTask( + const UI32_T unit) +{ + NPS_ERROR_NO_T rc = NPS_E_OK; + HAL_TAU_PKT_DRV_CB_T *ptr_cb = HAL_TAU_PKT_GET_DRV_CB_PTR(unit); + HAL_TAU_PKT_TX_CB_T *ptr_tx_cb = HAL_TAU_PKT_GET_TX_CB_PTR(unit); + HAL_TAU_PKT_RX_CB_T *ptr_rx_cb = HAL_TAU_PKT_GET_RX_CB_PTR(unit); + UI32_T channel = 0; + + if (0 != (ptr_cb->init_flag & HAL_TAU_PKT_INIT_TASK)) + { + HAL_TAU_PKT_DBG(HAL_TAU_PKT_DBG_ERR, + "u=%u, pkt task init failed, not inited\n", unit); + return (rc); + } + + /* Init handleErrorTask */ + rc = osal_createThread("ERROR", HAL_TAU_PKT_ERROR_ISR_STACK_SIZE, + HAL_TAU_PKT_ERROR_ISR_THREAD_PRI, _hal_tau_pkt_handleErrorTask, + (void *)((NPS_HUGE_T)unit), &ptr_cb->err_task_id); + + /* Init handleTxDoneTask */ + for (channel = 0; ((channel < HAL_TAU_PKT_TX_CHANNEL_LAST) && (NPS_E_OK == rc)); channel++) + { + ptr_tx_cb->isr_task_cookie[channel].unit = unit; + ptr_tx_cb->isr_task_cookie[channel].channel = channel; + + rc = osal_createThread("TX_ISR", HAL_TAU_PKT_TX_ISR_STACK_SIZE, + HAL_TAU_PKT_TX_ISR_THREAD_PRI, _hal_tau_pkt_handleTxDoneTask, + (void *)&ptr_tx_cb->isr_task_cookie[channel], + &ptr_tx_cb->isr_task_id[channel]); + } + + /* Init handleRxDoneTask */ + for (channel = 0; ((channel < HAL_TAU_PKT_RX_CHANNEL_LAST) && (NPS_E_OK == rc)); channel++) + { + ptr_rx_cb->isr_task_cookie[channel].unit = unit; + ptr_rx_cb->isr_task_cookie[channel].channel = channel; + + rc = osal_createThread("RX_ISR", HAL_TAU_PKT_RX_ISR_STACK_SIZE, + HAL_TAU_PKT_RX_ISR_THREAD_PRI, _hal_tau_pkt_handleRxDoneTask, + (void *)&ptr_rx_cb->isr_task_cookie[channel], + &ptr_rx_cb->isr_task_id[channel]); + } + + /* Init txTask */ + if (HAL_TAU_PKT_TX_WAIT_ASYNC == ptr_tx_cb->wait_mode) + { + ptr_tx_cb->running = TRUE; + } + + ptr_cb->init_flag |= HAL_TAU_PKT_INIT_TASK; + + HAL_TAU_PKT_DBG(HAL_TAU_PKT_DBG_COMMON, + "u=%u, pkt task init done, init flag=0x%x\n", unit, ptr_cb->init_flag); + + return (rc); +} + +/* FUNCTION NAME: _hal_tau_pkt_initTxPdma + * PURPOSE: + * To initialize the TX PDMA. + * INPUT: + * unit -- The unit ID + * channel -- The target Tx channel + * OUTPUT: + * None + * RETURN: + * NPS_E_OK -- Successfully initialize the TX PDMA. + * NOTES: + * None + */ +static NPS_ERROR_NO_T +_hal_tau_pkt_initTxPdma( + const UI32_T unit, + const HAL_TAU_PKT_TX_CHANNEL_T channel) +{ + NPS_ERROR_NO_T rc = NPS_E_OK; + HAL_TAU_PKT_TX_CB_T *ptr_tx_cb = HAL_TAU_PKT_GET_TX_CB_PTR(unit); + HAL_TAU_PKT_TX_PDMA_T *ptr_tx_pdma = HAL_TAU_PKT_GET_TX_PDMA_PTR(unit, channel); + NPS_IRQ_FLAGS_T irg_flags; + + /* Isr lock to protect Tx PDMA */ + osal_createIsrLock("TCH_LCK", &ptr_tx_pdma->ring_lock); + + if (HAL_TAU_PKT_TX_WAIT_SYNC_INTR == ptr_tx_cb->wait_mode) + { + /* Sync semaphore to signal sendTxPacket */ + osal_createSemaphore("TCH_SYN", NPS_SEMAPHORE_SYNC, &ptr_tx_pdma->sync_intr_sema); + } + + /* Reset Tx PDMA */ + osal_takeIsrLock(&ptr_tx_pdma->ring_lock, &irg_flags); + + ptr_tx_pdma->used_idx = 0; + ptr_tx_pdma->free_idx = 0; + ptr_tx_pdma->used_gpd_num = 0; + ptr_tx_pdma->free_gpd_num = HAL_TAU_PKT_PDMA_TX_GPD_NUM; + ptr_tx_pdma->gpd_num = HAL_TAU_PKT_PDMA_TX_GPD_NUM; + + /* Prepare the HW-GPD ring */ + ptr_tx_pdma->ptr_gpd_start_addr = (HAL_TAU_PKT_TX_GPD_T *)osal_dma_alloc( + (ptr_tx_pdma->gpd_num + 1) * sizeof(HAL_TAU_PKT_TX_GPD_T)); + + if (NULL != ptr_tx_pdma->ptr_gpd_start_addr) + { + osal_memset(ptr_tx_pdma->ptr_gpd_start_addr, 0x0, + (ptr_tx_pdma->gpd_num + 1) * sizeof(HAL_TAU_PKT_TX_GPD_T)); + + ptr_tx_pdma->ptr_gpd_align_start_addr = (HAL_TAU_PKT_TX_GPD_T *)HAL_TAU_PKT_PDMA_ALIGN_ADDR( + (NPS_HUGE_T)ptr_tx_pdma->ptr_gpd_start_addr, sizeof(HAL_TAU_PKT_TX_GPD_T)); + + rc = _hal_tau_pkt_initTxPdmaRing(unit, channel); + if (NPS_E_OK == rc) + { + _hal_tau_pkt_startTxChannelReg(unit, channel, 0); + } + } + else + { + ptr_tx_cb->cnt.no_memory++; + rc = NPS_E_NO_MEMORY; + } + + if (HAL_TAU_PKT_TX_WAIT_ASYNC == ptr_tx_cb->wait_mode) + { + if (NPS_E_OK == rc) + { + /* Prepare the SW-GPD ring */ + ptr_tx_pdma->pptr_sw_gpd_ring = (HAL_TAU_PKT_TX_SW_GPD_T **)osal_alloc( + ptr_tx_pdma->gpd_num * sizeof(HAL_TAU_PKT_TX_SW_GPD_T *)); + + if (NULL != ptr_tx_pdma->pptr_sw_gpd_ring) + { + osal_memset(ptr_tx_pdma->pptr_sw_gpd_ring, 0x0, + ptr_tx_pdma->gpd_num * sizeof(HAL_TAU_PKT_TX_SW_GPD_T *)); + } + else + { + ptr_tx_cb->cnt.no_memory++; + rc = NPS_E_NO_MEMORY; + } + + /* a temp buffer to store the 1st sw gpd for each packet to be enque + * we cannot enque packet before release a spinlock + */ + ptr_tx_pdma->pptr_sw_gpd_bulk = (HAL_TAU_PKT_TX_SW_GPD_T **)osal_alloc( + ptr_tx_pdma->gpd_num * sizeof(HAL_TAU_PKT_TX_SW_GPD_T *)); + + if (NULL != ptr_tx_pdma->pptr_sw_gpd_bulk) + { + osal_memset(ptr_tx_pdma->pptr_sw_gpd_bulk, 0x0, + ptr_tx_pdma->gpd_num * sizeof(HAL_TAU_PKT_TX_SW_GPD_T *)); + } + else + { + ptr_tx_cb->cnt.no_memory++; + rc = NPS_E_NO_MEMORY; + } + } + } + + osal_giveIsrLock(&ptr_tx_pdma->ring_lock, &irg_flags); + + return (rc); +} + +/* FUNCTION NAME: _hal_tau_pkt_initRxPdma + * PURPOSE: + * To initialize the RX PDMA. + * INPUT: + * unit -- The unit ID + * channel -- The target Rx channel + * OUTPUT: + * None + * RETURN: + * NPS_E_OK -- Successfully initialize the RX PDMA. + * NOTES: + * None + */ +static NPS_ERROR_NO_T +_hal_tau_pkt_initRxPdma( + const UI32_T unit, + const HAL_TAU_PKT_RX_CHANNEL_T channel) +{ + NPS_ERROR_NO_T rc = NPS_E_OK; + HAL_TAU_PKT_RX_CB_T *ptr_rx_cb = HAL_TAU_PKT_GET_RX_CB_PTR(unit); + HAL_TAU_PKT_RX_PDMA_T *ptr_rx_pdma = HAL_TAU_PKT_GET_RX_PDMA_PTR(unit, channel); + + /* Binary semaphore to protect Rx PDMA */ + osal_createSemaphore("RCH_LCK", NPS_SEMAPHORE_BINARY, &ptr_rx_pdma->sema); + + /* Reset Rx PDMA */ + osal_takeSemaphore(&ptr_rx_pdma->sema, NPS_SEMAPHORE_WAIT_FOREVER); + ptr_rx_pdma->cur_idx = 0; + ptr_rx_pdma->gpd_num = HAL_TAU_PKT_PDMA_RX_GPD_NUM; + + /* Prepare the HW-GPD ring */ + ptr_rx_pdma->ptr_gpd_start_addr = (HAL_TAU_PKT_RX_GPD_T *)osal_dma_alloc( + (ptr_rx_pdma->gpd_num + 1) * sizeof(HAL_TAU_PKT_RX_GPD_T)); + + if (NULL != ptr_rx_pdma->ptr_gpd_start_addr) + { + osal_memset(ptr_rx_pdma->ptr_gpd_start_addr, 0, + (ptr_rx_pdma->gpd_num + 1) * sizeof(HAL_TAU_PKT_RX_GPD_T)); + + ptr_rx_pdma->ptr_gpd_align_start_addr = (HAL_TAU_PKT_RX_GPD_T *)HAL_TAU_PKT_PDMA_ALIGN_ADDR( + (NPS_HUGE_T)ptr_rx_pdma->ptr_gpd_start_addr, sizeof(HAL_TAU_PKT_RX_GPD_T)); + + /* will initRxPdmaRingBuf and start RCH after setRxConfig */ + rc = _hal_tau_pkt_initRxPdmaRing(unit, channel); + } + else + { + ptr_rx_cb->cnt.no_memory++; + rc = NPS_E_NO_MEMORY; + } + + if (NPS_E_OK == rc) + { + /* Prepare the SKB ring */ + ptr_rx_pdma->pptr_skb_ring = (struct sk_buff **)osal_alloc( + ptr_rx_pdma->gpd_num * sizeof(struct sk_buff *)); + + if (NULL != ptr_rx_pdma->pptr_skb_ring) + { + osal_memset(ptr_rx_pdma->pptr_skb_ring, 0x0, + ptr_rx_pdma->gpd_num * sizeof(struct sk_buff *)); + } + else + { + ptr_rx_cb->cnt.no_memory++; + rc = NPS_E_NO_MEMORY; + } + } + + osal_giveSemaphore(&ptr_rx_pdma->sema); + + return (rc); +} + +/* FUNCTION NAME: _hal_tau_pkt_initPktCb + * PURPOSE: + * To initialize the control block of Drv. + * INPUT: + * unit -- The unit ID + * OUTPUT: + * None + * RETURN: + * NPS_E_OK -- Successfully initialize the control block. + * NOTES: + * None + */ +static NPS_ERROR_NO_T +_hal_tau_pkt_initPktCb( + const UI32_T unit) +{ + HAL_TAU_PKT_DRV_CB_T *ptr_cb = HAL_TAU_PKT_GET_DRV_CB_PTR(unit); + UI32_T idx = 0, vec = sizeof(_hal_tau_pkt_intr_vec) / sizeof(HAL_TAU_PKT_INTR_VEC_T); + + osal_memset(ptr_cb, 0x0, sizeof(HAL_TAU_PKT_DRV_CB_T)); + + /* Register PKT interrupt functions */ + osal_createIsrLock("ISR_LOCK", &ptr_cb->intr_lock); + osal_mdc_registerIsr(unit, _hal_tau_pkt_dispatcher, (void *)((NPS_HUGE_T)unit)); + + for (idx = 0; idx < vec; idx++) + { + osal_createEvent("ISR_EVENT", &_hal_tau_pkt_intr_vec[idx].intr_event); + ptr_cb->intr_bitmap |= (_hal_tau_pkt_intr_vec[idx].intr_reg); + } + + return (NPS_E_OK); +} + +/* FUNCTION NAME: _hal_tau_pkt_initPktTxCb + * PURPOSE: + * To initialize the control block of Rx PDMA. + * INPUT: + * unit -- The unit ID + * OUTPUT: + * None + * RETURN: + * NPS_E_OK -- Successfully initialize the control block. + * NPS_E_OTHERS -- Configure failed. + * NOTES: + * None + */ +static NPS_ERROR_NO_T +_hal_tau_pkt_initPktTxCb( + const UI32_T unit) +{ + NPS_ERROR_NO_T rc = NPS_E_OK; + HAL_TAU_PKT_TX_CB_T *ptr_tx_cb = HAL_TAU_PKT_GET_TX_CB_PTR(unit); + HAL_TAU_PKT_TX_CHANNEL_T channel = 0; + + osal_memset(ptr_tx_cb, 0x0, sizeof(HAL_TAU_PKT_TX_CB_T)); + + ptr_tx_cb->wait_mode = HAL_TAU_PKT_TX_WAIT_MODE; + + if (HAL_TAU_PKT_TX_WAIT_ASYNC == ptr_tx_cb->wait_mode) + { + /* Sync semaphore to signal txTask */ + osal_createEvent("TX_SYNC", &ptr_tx_cb->sync_sema); + + /* Initialize Tx GPD-queue (of first SW-GPD) from handleTxDoneTask to txTask */ + ptr_tx_cb->sw_queue.len = HAL_TAU_PKT_TX_QUEUE_LEN; + ptr_tx_cb->sw_queue.weight = 0; + + osal_createSemaphore("TX_QUE", NPS_SEMAPHORE_BINARY, &ptr_tx_cb->sw_queue.sema); + osal_que_create(&ptr_tx_cb->sw_queue.que_id, ptr_tx_cb->sw_queue.len); + } + else if (HAL_TAU_PKT_TX_WAIT_SYNC_POLL == ptr_tx_cb->wait_mode) + { + /* Disable TX done ISR. */ + for (channel = 0; channel < HAL_TAU_PKT_TX_CHANNEL_LAST; channel++) + { + _hal_tau_pkt_disableIntr(unit, HAL_TAU_PKT_TCH_REG(unit, channel)); + } + } + + /* Init Tx PDMA */ + for (channel = 0; ((channel < HAL_TAU_PKT_TX_CHANNEL_LAST) && (NPS_E_OK == rc)); channel++) + { + rc = _hal_tau_pkt_initTxPdma(unit, channel); + } + + return (rc); +} + +/* FUNCTION NAME: _hal_tau_pkt_initPktRxCb + * PURPOSE: + * To initialize the control block of Rx PDMA. + * INPUT: + * unit -- The unit ID + * OUTPUT: + * None + * RETURN: + * NPS_E_OK -- Successfully initialize the control block. + * NPS_E_OTHERS -- Configure failed. + * NOTES: + * + */ +static NPS_ERROR_NO_T +_hal_tau_pkt_initPktRxCb( + const UI32_T unit) +{ + NPS_ERROR_NO_T rc = NPS_E_OK; + HAL_TAU_PKT_RX_CB_T *ptr_rx_cb = HAL_TAU_PKT_GET_RX_CB_PTR(unit); + HAL_TAU_PKT_RX_CHANNEL_T channel = 0; + UI32_T queue = 0; + + osal_memset(ptr_rx_cb, 0x0, sizeof(HAL_TAU_PKT_RX_CB_T)); + + ptr_rx_cb->sched_mode = HAL_TAU_PKT_RX_SCHED_MODE; + + /* Sync semaphore to signal rxTask */ + osal_createEvent("RX_SYNC", &ptr_rx_cb->sync_sema); + + /* Initialize Rx GPD-queue (of first SW-GPD) from handleRxDoneTask to rxTask */ + for (queue = 0; ((queue < HAL_TAU_PKT_RX_QUEUE_NUM) && (NPS_E_OK == rc)); queue++) + { + ptr_rx_cb->sw_queue[queue].len = HAL_TAU_PKT_RX_QUEUE_LEN; + ptr_rx_cb->sw_queue[queue].weight = HAL_TAU_PKT_RX_QUEUE_WEIGHT; + + osal_createSemaphore("RX_QUE", NPS_SEMAPHORE_BINARY, &ptr_rx_cb->sw_queue[queue].sema); + osal_que_create(&ptr_rx_cb->sw_queue[queue].que_id, ptr_rx_cb->sw_queue[queue].len); + } + + /* Init Rx PDMA */ + for (channel = 0; ((channel < HAL_TAU_PKT_RX_CHANNEL_LAST) && (NPS_E_OK == rc)); channel++) + { + rc = _hal_tau_pkt_initRxPdma(unit, channel); + } + + return (rc); +} + +/* FUNCTION NAME: _hal_tau_pkt_initL1Isr + * PURPOSE: + * To initialize the PDMA L1 ISR configuration. + * INPUT: + * unit -- The unit ID + * OUTPUT: + * None + * RETURN: + * NPS_E_OK -- Successfully initialize the L1 ISR. + * NPS_E_OTHERS -- Configure failed. + * NOTES: + * None + */ +static NPS_ERROR_NO_T +_hal_tau_pkt_initL1Isr( + const UI32_T unit) +{ + UI32_T idx = 0, vec = sizeof(_hal_tau_pkt_intr_vec) / sizeof(HAL_TAU_PKT_INTR_VEC_T); + + for (idx = 0; idx < vec; idx++) + { + _hal_tau_pkt_enableIntr(unit, _hal_tau_pkt_intr_vec[idx].intr_reg); + _hal_tau_pkt_unmaskIntr(unit, _hal_tau_pkt_intr_vec[idx].intr_reg); + } + + return (NPS_E_OK); +} + +/* FUNCTION NAME: _hal_tau_pkt_initL2Isr + * PURPOSE: + * To initialize the PDMA L2 ISR configuration. + * INPUT: + * unit -- The unit ID + * OUTPUT: + * None + * RETURN: + * NPS_E_OK -- Successfully configure for the L2 ISR. + * NPS_E_OTHERS -- Configure failed. + * NOTES: + * None + */ +static NPS_ERROR_NO_T +_hal_tau_pkt_initL2Isr( + const UI32_T unit) +{ + HAL_TAU_PKT_L2_ISR_T isr_status = 0x0; + + HAL_TAU_PKT_SET_BITMAP(isr_status, HAL_TAU_PKT_L2_ISR_RCH0); + HAL_TAU_PKT_SET_BITMAP(isr_status, HAL_TAU_PKT_L2_ISR_RCH1); + HAL_TAU_PKT_SET_BITMAP(isr_status, HAL_TAU_PKT_L2_ISR_RCH2); + HAL_TAU_PKT_SET_BITMAP(isr_status, HAL_TAU_PKT_L2_ISR_RCH3); + HAL_TAU_PKT_SET_BITMAP(isr_status, HAL_TAU_PKT_L2_ISR_TCH0); + HAL_TAU_PKT_SET_BITMAP(isr_status, HAL_TAU_PKT_L2_ISR_TCH1); + HAL_TAU_PKT_SET_BITMAP(isr_status, HAL_TAU_PKT_L2_ISR_TCH2); + HAL_TAU_PKT_SET_BITMAP(isr_status, HAL_TAU_PKT_L2_ISR_TCH3); + HAL_TAU_PKT_SET_BITMAP(isr_status, HAL_TAU_PKT_L2_ISR_RX_QID_MAP_ERR); + HAL_TAU_PKT_SET_BITMAP(isr_status, HAL_TAU_PKT_L2_ISR_RX_FRAME_ERR); + + osal_mdc_writePciReg(unit, + HAL_TAU_PKT_GET_MMIO(HAL_TAU_PKT_PDMA_ERR_INT_EN), + &isr_status, sizeof(UI32_T)); + + osal_mdc_writePciReg(unit, + HAL_TAU_PKT_GET_MMIO(HAL_TAU_PKT_PDMA_ERR_INT_MASK_SET), + &isr_status, sizeof(UI32_T)); + + return (NPS_E_OK); + +} + +NPS_ERROR_NO_T +_hal_tau_pkt_resetIosCreditCfg( + const UI32_T unit) +{ +#define HAL_TAU_PKT_PDMA_CREDIT_CFG_RESET_OFFSET (16) + + UI32_T credit_cfg = 0x0; + UI32_T idx; + + for (idx=0; idxptr_profile = ptr_new_profile; + + /* Create the 1st node in the interface profile list */ + if (NULL == *pptr_profile_list) + { + HAL_TAU_PKT_DBG(HAL_TAU_PKT_DBG_PROFILE, + "prof list empty\n"); + *pptr_profile_list = ptr_new_prof_node; + ptr_new_prof_node->ptr_next_node = NULL; + } + else + { + ptr_prev_node = *pptr_profile_list; + ptr_curr_node = *pptr_profile_list; + + while (ptr_curr_node != NULL) + { + if (ptr_curr_node->ptr_profile->priority <= ptr_new_profile->priority) + { + HAL_TAU_PKT_DBG(HAL_TAU_PKT_DBG_PROFILE, + "find prof id=%d (%s) higher priority=%d, search next\n", + ptr_curr_node->ptr_profile->id, + ptr_curr_node->ptr_profile->name, + ptr_curr_node->ptr_profile->priority); + /* Search the next node */ + ptr_prev_node = ptr_curr_node; + ptr_curr_node = ptr_curr_node->ptr_next_node; + } + else + { + /* Insert intermediate node */ + ptr_new_prof_node->ptr_next_node = ptr_curr_node; + HAL_TAU_PKT_DBG(HAL_TAU_PKT_DBG_PROFILE, + "insert prof id=%d (%s) before prof id=%d (%s) (priority=%d >= %d)\n", + ptr_new_prof_node->ptr_profile->id, + ptr_new_prof_node->ptr_profile->name, + ptr_curr_node->ptr_profile->id, + ptr_curr_node->ptr_profile->name, + ptr_new_prof_node->ptr_profile->priority, + ptr_curr_node->ptr_profile->priority); + + if (ptr_prev_node == ptr_curr_node) + { + /* There is no previous node: change the root */ + *pptr_profile_list = ptr_new_prof_node; + HAL_TAU_PKT_DBG(HAL_TAU_PKT_DBG_PROFILE, + "insert prof id=%d (%s) to head (priority=%d)\n", + ptr_new_prof_node->ptr_profile->id, + ptr_new_prof_node->ptr_profile->name, + ptr_new_prof_node->ptr_profile->priority); + } + else + { + ptr_prev_node->ptr_next_node = ptr_new_prof_node; + HAL_TAU_PKT_DBG(HAL_TAU_PKT_DBG_PROFILE, + "insert prof id=%d (%s) after prof id=%d (%s) (priority=%d <= %d)\n", + ptr_new_prof_node->ptr_profile->id, + ptr_new_prof_node->ptr_profile->name, + ptr_prev_node->ptr_profile->id, + ptr_prev_node->ptr_profile->name, + ptr_new_prof_node->ptr_profile->priority, + ptr_prev_node->ptr_profile->priority); + } + + return (NPS_E_OK); + } + } + + /* Insert node to the tail of list */ + ptr_prev_node->ptr_next_node = ptr_new_prof_node; + ptr_new_prof_node->ptr_next_node = NULL; + HAL_TAU_PKT_DBG(HAL_TAU_PKT_DBG_PROFILE, + "insert prof id=%d (%s) to tail, after prof id=%d (%s) (priority=%d <= %d)\n", + ptr_new_prof_node->ptr_profile->id, + ptr_new_prof_node->ptr_profile->name, + ptr_prev_node->ptr_profile->id, + ptr_prev_node->ptr_profile->name, + ptr_new_prof_node->ptr_profile->priority, + ptr_prev_node->ptr_profile->priority); + } + + return (NPS_E_OK); +} + +static NPS_ERROR_NO_T +_hal_tau_pkt_addProfToAllIntf( + HAL_TAU_PKT_NETIF_PROFILE_T *ptr_new_profile) +{ + UI32_T port; + HAL_TAU_PKT_NETIF_PORT_DB_T *ptr_port_db; + + for (port = 0; port < HAL_TAU_PKT_MAX_PORT_NUM; port++) + { + ptr_port_db = HAL_TAU_PKT_GET_PORT_DB(port); + /* Shall we check if the interface is ever created in the port?? */ + //if (NULL != ptr_port_db->ptr_net_dev) + if (1) + { + _hal_tau_pkt_addProfToList(ptr_new_profile, &ptr_port_db->ptr_profile_list); + } + } + + return (NPS_E_OK); +} + +static HAL_TAU_PKT_NETIF_PROFILE_T * +_hal_tau_pkt_delProfFromListById( + const UI32_T id, + HAL_TAU_PKT_PROFILE_NODE_T **pptr_profile_list) +{ + HAL_TAU_PKT_PROFILE_NODE_T *ptr_temp_node; + HAL_TAU_PKT_PROFILE_NODE_T *ptr_curr_node, *ptr_prev_node; + HAL_TAU_PKT_NETIF_PROFILE_T *ptr_profile = NULL;; + + if (NULL != *pptr_profile_list) + { + /* Check the 1st node */ + if (id == (*pptr_profile_list)->ptr_profile->id) + { + ptr_profile = (*pptr_profile_list)->ptr_profile; + ptr_temp_node = (*pptr_profile_list); + (*pptr_profile_list) = ptr_temp_node->ptr_next_node; + + if (NULL != ptr_temp_node->ptr_next_node) + { + HAL_TAU_PKT_DBG(HAL_TAU_PKT_DBG_PROFILE, + "choose prof id=%d (%s) as new head\n", + ptr_temp_node->ptr_next_node->ptr_profile->id, + ptr_temp_node->ptr_next_node->ptr_profile->name); + } + else + { + HAL_TAU_PKT_DBG(HAL_TAU_PKT_DBG_PROFILE, + "prof list is empty\n"); + } + + + osal_free(ptr_temp_node); + } + else + { + ptr_prev_node = *pptr_profile_list; + ptr_curr_node = ptr_prev_node->ptr_next_node; + + while (NULL != ptr_curr_node) + { + if (id != ptr_curr_node->ptr_profile->id) + { + ptr_prev_node = ptr_curr_node; + ptr_curr_node = ptr_curr_node->ptr_next_node; + } + else + { + HAL_TAU_PKT_DBG(HAL_TAU_PKT_DBG_PROFILE, + "find prof id=%d, free done\n", id); + + ptr_profile = ptr_curr_node->ptr_profile; + ptr_prev_node->ptr_next_node = ptr_curr_node->ptr_next_node; + osal_free(ptr_curr_node); + break; + } + } + } + } + + if (NULL == ptr_profile) + { + HAL_TAU_PKT_DBG((HAL_TAU_PKT_DBG_PROFILE | HAL_TAU_PKT_DBG_ERR), + "find prof failed, id=%d\n", id); + } + + return (ptr_profile); +} + + +static NPS_ERROR_NO_T +_hal_tau_pkt_delProfFromAllIntfById( + const UI32_T id) +{ + UI32_T port; + HAL_TAU_PKT_NETIF_PORT_DB_T *ptr_port_db; + + for (port = 0; port < HAL_TAU_PKT_MAX_PORT_NUM; port++) + { + ptr_port_db = HAL_TAU_PKT_GET_PORT_DB(port); + /* Shall we check if the interface is ever created in the port?? */ + //if (NULL != ptr_port_db->ptr_net_dev) + if (1) + { + _hal_tau_pkt_delProfFromListById(id, &ptr_port_db->ptr_profile_list); + } + } + return (NPS_E_OK); +} + +static NPS_ERROR_NO_T +_hal_tau_pkt_allocProfEntry( + HAL_TAU_PKT_NETIF_PROFILE_T *ptr_profile) +{ + UI32_T idx; + + for (idx=0; idxid = idx; + return (NPS_E_OK); + } + } + return (NPS_E_TABLE_FULL); +} + +static HAL_TAU_PKT_NETIF_PROFILE_T * +_hal_tau_pkt_freeProfEntry( + const UI32_T id) +{ + HAL_TAU_PKT_NETIF_PROFILE_T *ptr_profile = NULL; + + if (id < HAL_TAU_PKT_NET_PROFILE_NUM_MAX) + { + ptr_profile = _ptr_hal_tau_pkt_profile_entry[id]; + _ptr_hal_tau_pkt_profile_entry[id] = NULL; + } + + return (ptr_profile); +} + +static NPS_ERROR_NO_T +_hal_tau_pkt_destroyAllIntf( + const UI32_T unit) +{ + HAL_TAU_PKT_NETIF_PORT_DB_T *ptr_port_db; + UI32_T port = 0; + + /* Unregister net devices by id, although the "id" is now relavent to "port" we still perform a search */ + for (port = 0; port < HAL_TAU_PKT_MAX_PORT_NUM; port++) + { + ptr_port_db = HAL_TAU_PKT_GET_PORT_DB(port); + if (NULL != ptr_port_db->ptr_net_dev) /* valid intf */ + { + HAL_TAU_PKT_DBG(HAL_TAU_PKT_DBG_INTF, + "u=%u, find intf %s (id=%d) on phy port=%d, destroy done\n", + unit, + ptr_port_db->meta.name, + ptr_port_db->meta.port, + ptr_port_db->meta.port); + + netif_stop_queue(ptr_port_db->ptr_net_dev); + unregister_netdev(ptr_port_db->ptr_net_dev); + free_netdev(ptr_port_db->ptr_net_dev); + + /* Don't need to remove profiles on this port. + * In fact, the profile is binding to "port" not "intf". + */ + /* _hal_tau_pkt_destroyProfList(ptr_port_db->ptr_profile_list); */ + + osal_memset(ptr_port_db, 0x0, sizeof(HAL_TAU_PKT_NETIF_PORT_DB_T)); + } + } + + return (NPS_E_OK); +} + +static NPS_ERROR_NO_T +_hal_tau_pkt_delProfListOnAllIntf( + const UI32_T unit) +{ + HAL_TAU_PKT_NETIF_PORT_DB_T *ptr_port_db; + UI32_T port = 0; + HAL_TAU_PKT_PROFILE_NODE_T *ptr_curr_node, *ptr_next_node; + + /* Unregister net devices by id, although the "id" is now relavent to "port" we still perform a search */ + for (port = 0; port < HAL_TAU_PKT_MAX_PORT_NUM; port++) + { + ptr_port_db = HAL_TAU_PKT_GET_PORT_DB(port); + if (NULL != ptr_port_db->ptr_profile_list) /* valid intf */ + { + ptr_curr_node = ptr_port_db->ptr_profile_list; + while (NULL != ptr_curr_node) + { + HAL_TAU_PKT_DBG(HAL_TAU_PKT_DBG_PROFILE, + "u=%u, del prof id=%d on phy port=%d\n", + unit, ptr_curr_node->ptr_profile->id, port); + + ptr_next_node = ptr_curr_node->ptr_next_node; + osal_free(ptr_curr_node); + ptr_curr_node = ptr_next_node; + } + } + } + + return (NPS_E_OK); +} + +static NPS_ERROR_NO_T +_hal_tau_pkt_destroyAllProfile( + const UI32_T unit) +{ + HAL_TAU_PKT_NETIF_PROFILE_T *ptr_profile; + UI32_T prof_id; + + _hal_tau_pkt_delProfListOnAllIntf(unit); + + for (prof_id=0; prof_idid, + ptr_profile->name, + ptr_profile->priority, + ptr_profile->flags); + osal_free(ptr_profile); + } + } + + return (NPS_E_OK); +} + +/* FUNCTION NAME: hal_tau_pkt_initPktDrv + * PURPOSE: + * To invoke the functions to initialize the control block for each + * PDMA subsystem. + * INPUT: + * unit -- The unit ID + * OUTPUT: + * None + * RETURN: + * NPS_E_OK -- Successfully initialize the control blocks. + * NPS_E_OTHERS -- Initialize the control blocks failed. + * NOTES: + * None + */ +NPS_ERROR_NO_T +hal_tau_pkt_initPktDrv( + const UI32_T unit) +{ + NPS_ERROR_NO_T rc = NPS_E_OK; + UI32_T channel = 0; + UI32_T flush_intr = 0x0; + UI32_T clear_intr = 0xffffffff; + HAL_TAU_PKT_DRV_CB_T *ptr_cb = HAL_TAU_PKT_GET_DRV_CB_PTR(unit); + + /* There's a case that PDMA Tx is on-going when doing chip reset, + * where PDMA may hang and be not programmable since current Tx packet + * stucks due to IOS credit too low. + * Thus, we always reset IOS credit value before progrmming Tx PDMA. + */ + _hal_tau_pkt_resetIosCreditCfg(unit); + + /* Since the users may kill SDK application without a de-init flow, + * we help to detect if NETIF is ever init before, and perform deinit. + * (Because the users cannot perform Task init bypassing Drv init, this + * check is required only in here) + */ + if (0 != (ptr_cb->init_flag & HAL_TAU_PKT_INIT_DRV)) + { + HAL_TAU_PKT_DBG(HAL_TAU_PKT_DBG_ERR, + "u=%u, init pkt drv failed, inited\n", unit); + + HAL_TAU_PKT_DBG(HAL_TAU_PKT_DBG_ERR, + "u=%u, stop rx pkt\n", unit); + _hal_tau_pkt_rxStop(unit); + + HAL_TAU_PKT_DBG(HAL_TAU_PKT_DBG_ERR, + "u=%u, stop all intf\n", unit); + _hal_tau_pkt_stopAllIntf(unit); + + HAL_TAU_PKT_DBG(HAL_TAU_PKT_DBG_ERR, + "u=%u, deinit pkt task\n", unit); + + hal_tau_pkt_deinitTask(unit); + + HAL_TAU_PKT_DBG(HAL_TAU_PKT_DBG_ERR, + "u=%u, deinit pkt drv\n", unit); + hal_tau_pkt_deinitPktDrv(unit); + + HAL_TAU_PKT_DBG(HAL_TAU_PKT_DBG_ERR, + "u=%u, destroy all prof\n", unit); + _hal_tau_pkt_destroyAllProfile(unit); + + HAL_TAU_PKT_DBG(HAL_TAU_PKT_DBG_ERR, + "u=%u, destroy all intf\n", unit); + _hal_tau_pkt_destroyAllIntf(unit); + } + + /* [cold-boot] 1. stop DMA channel + * 2. disable/mask/clear the interrupt status. + */ + osal_mdc_writePciReg(unit, + HAL_TAU_PKT_GET_MMIO(HAL_TAU_PKT_PDMA_ERR_INT_EN), + &flush_intr, sizeof(UI32_T)); + + osal_mdc_writePciReg(unit, + HAL_TAU_PKT_GET_MMIO(HAL_TAU_PKT_PDMA_ERR_INT_MASK_SET), + &clear_intr, sizeof(UI32_T)); + + osal_mdc_writePciReg(unit, + HAL_TAU_PKT_GET_MMIO(HAL_TAU_PKT_PDMA_ERR_INT_CLR), + &clear_intr, sizeof(UI32_T)); + + for (channel = 0; channel < HAL_TAU_PKT_TX_CHANNEL_LAST; channel++) + { + _hal_tau_pkt_stopTxChannelReg(unit, channel); + _hal_tau_pkt_maskAllTxL2IsrReg(unit, channel); + _hal_tau_pkt_clearTxL2IsrStatusReg(unit, channel, clear_intr); + } + + for (channel = 0; channel < HAL_TAU_PKT_RX_CHANNEL_LAST; channel++) + { + _hal_tau_pkt_stopRxChannelReg(unit, channel); + _hal_tau_pkt_maskAllRxL2IsrReg(unit, channel); + _hal_tau_pkt_clearRxL2IsrStatusReg(unit, channel, clear_intr); + } + + rc = _hal_tau_pkt_initPktCb(unit); + if (NPS_E_OK == rc) + { + rc = _hal_tau_pkt_initPktTxCb(unit); + } + if (NPS_E_OK == rc) + { + rc = _hal_tau_pkt_initPktRxCb(unit); + } + if (NPS_E_OK == rc) + { + rc = _hal_tau_pkt_initL1Isr(unit); + } + if (NPS_E_OK == rc) + { + rc = _hal_tau_pkt_initL2Isr(unit); + } + + /* Set the flag to record init state */ + ptr_cb->init_flag |= HAL_TAU_PKT_INIT_DRV; + + HAL_TAU_PKT_DBG(HAL_TAU_PKT_DBG_COMMON, + "u=%u, pkt drv init done, init flag=0x%x\n", unit, ptr_cb->init_flag); + + return (rc); +} + +/* ----------------------------------------------------------------------------------- Init: I/O */ +NPS_ERROR_NO_T +hal_tau_pkt_getNetDev( + const UI32_T unit, + const UI32_T port, + struct net_device **pptr_net_dev) +{ + *pptr_net_dev = HAL_TAU_PKT_GET_PORT_NETDEV(port); + + return (NPS_E_OK); +} + +NPS_ERROR_NO_T +hal_tau_pkt_prepareGpd( + const UI32_T unit, + const NPS_ADDR_T phy_addr, + const UI32_T len, + const UI32_T port, + HAL_TAU_PKT_TX_SW_GPD_T *ptr_sw_gpd) +{ + /* fill up tx_gpd */ + ptr_sw_gpd->tx_gpd.data_buf_addr_hi = NPS_ADDR_64_HI(phy_addr); + ptr_sw_gpd->tx_gpd.data_buf_addr_lo = NPS_ADDR_64_LOW(phy_addr); + ptr_sw_gpd->tx_gpd.data_buf_size = len; + ptr_sw_gpd->tx_gpd.chksum = 0x0; + ptr_sw_gpd->tx_gpd.ipc = 0; /* Raw mode, sent to plane 0 */ + ptr_sw_gpd->tx_gpd.prg = HAL_TAU_PKT_PRG_PROCESS_GPD; + ptr_sw_gpd->tx_gpd.hwo = HAL_TAU_PKT_HWO_HW_OWN; + ptr_sw_gpd->tx_gpd.ch = HAL_TAU_PKT_CH_LAST_GPD; + ptr_sw_gpd->tx_gpd.ioc = HAL_TAU_PKT_IOC_HAS_INTR; + ptr_sw_gpd->tx_gpd.pkt_len = len; + ptr_sw_gpd->tx_gpd.crcc = HAL_TAU_PKT_CRCC_SUM_BY_HW; + + /* fill up cpu header */ + ptr_sw_gpd->tx_gpd.itmh_eth.skip_ipp = 1; + ptr_sw_gpd->tx_gpd.itmh_eth.skip_epp = 1; + ptr_sw_gpd->tx_gpd.itmh_eth.color = 0; /* Green */ + ptr_sw_gpd->tx_gpd.itmh_eth.tc = 15; /* Max tc */ + ptr_sw_gpd->tx_gpd.itmh_eth.igr_phy_port = 0; + + ptr_sw_gpd->tx_gpd.pph_l2.mrk_pcp_val = 7; /* Max pcp */ + ptr_sw_gpd->tx_gpd.pph_l2.mrk_pcp_dei_en = 1; + + /* destination index + * 1. to local ETH port + * 2. to remote ETH port + * 3. to remote CPU + */ + ptr_sw_gpd->tx_gpd.itmh_eth.dst_idx = port; + + /* [Taurus] we should set all-1 for the following fields to skip some tm-logic */ + + /* TM header */ + ptr_sw_gpd->tx_gpd.itmh_eth.src_idx = 0x7fff; + ptr_sw_gpd->tx_gpd.itmh_eth.intf_fdid = 0x3fff; + ptr_sw_gpd->tx_gpd.itmh_eth.src_supp_tag = 0x1f; + ptr_sw_gpd->tx_gpd.itmh_eth.nvo3_mgid = 0x6fff; + ptr_sw_gpd->tx_gpd.itmh_eth.nvo3_src_supp_tag_w0 = 0x1; + ptr_sw_gpd->tx_gpd.itmh_eth.nvo3_src_supp_tag_w1 = 0xf; + + /* PP header */ + ptr_sw_gpd->tx_gpd.pph_l2.nvo3_encap_idx = HAL_TAU_INVALID_NVO3_ENCAP_IDX; + ptr_sw_gpd->tx_gpd.pph_l2.nvo3_adj_idx = HAL_TAU_INVALID_NVO3_ADJ_IDX; + + return (NPS_E_OK); +} + +/* ----------------------------------------------------------------------------------- Init: net_dev_ops */ +static int +_hal_tau_pkt_net_dev_init( + struct net_device *ptr_net_dev) +{ + return 0; +} + +static int +_hal_tau_pkt_net_dev_open( + struct net_device *ptr_net_dev) +{ + netif_start_queue(ptr_net_dev); + +#if defined(PERF_EN_TEST) + /* Tx (len, tx_channel, rx_channel, test_skb) */ + perf_test(64, 1, 0, FALSE); + perf_test(64, 2, 0, FALSE); + perf_test(64, 4, 0, FALSE); + + perf_test(1518, 1, 0, FALSE); + perf_test(1518, 2, 0, FALSE); + perf_test(1518, 4, 0, FALSE); + + perf_test(9216, 1, 0, FALSE); + perf_test(9216, 2, 0, FALSE); + perf_test(9216, 4, 0, FALSE); + + /* Rx (len, tx_channel, rx_channel, test_skb) */ + perf_test(64, 0, 1, FALSE); + perf_test(64, 0, 3, FALSE); + perf_test(64, 0, 4, FALSE); + + perf_test(1518, 0, 1, FALSE); + perf_test(1518, 0, 3, FALSE); + perf_test(1518, 0, 4, FALSE); + + perf_test(9216, 0, 1, FALSE); + perf_test(9216, 0, 3, FALSE); + perf_test(9216, 0, 4, FALSE); +#endif + + return 0; +} + +static int +_hal_tau_pkt_net_dev_stop( + struct net_device *ptr_net_dev) +{ + netif_stop_queue(ptr_net_dev); + return 0; +} + +static int +_hal_tau_pkt_net_dev_ioctl( + struct net_device *ptr_net_dev, + struct ifreq *ptr_ifreq, + int cmd) +{ + return 0; +} + +static netdev_tx_t +_hal_tau_pkt_net_dev_tx( + struct sk_buff *ptr_skb, + struct net_device *ptr_net_dev) +{ + struct net_device_priv *ptr_priv = netdev_priv(ptr_net_dev); + /* chip meta */ + unsigned int unit; + unsigned int channel = 0; + HAL_TAU_PKT_TX_SW_GPD_T *ptr_sw_gpd = NULL; + void *ptr_virt_addr = NULL; + NPS_ADDR_T phy_addr = 0x0; + + if (NULL == ptr_priv) + { + /* in case that the netdev has been freed/reset somewhere */ + HAL_TAU_PKT_DBG(HAL_TAU_PKT_DBG_ERR, "get netdev_priv failed\n"); + return -EFAULT; + } + + /* check skb */ + if (NULL == ptr_skb) + { + ptr_priv->stats.tx_errors++; + return -EFAULT; + } + + unit = ptr_priv->unit; + + /* pad to 60-bytes if skb_len < 60, see: eth_skb_pad(skb) */ + if (ptr_skb->len < ETH_ZLEN) + { + skb_pad(ptr_skb, ETH_ZLEN - ptr_skb->len); + skb_set_tail_pointer(ptr_skb, ETH_ZLEN); + ptr_skb->len = ETH_ZLEN; + } + + /* pad 4-bytes for chip-crc */ + skb_pad(ptr_skb, ETH_FCS_LEN); + skb_set_tail_pointer(ptr_skb, ETH_FCS_LEN); + ptr_skb->len += ETH_FCS_LEN; + + /* alloc gpd */ + ptr_sw_gpd = osal_alloc(sizeof(HAL_TAU_PKT_TX_SW_GPD_T)); + if (NULL == ptr_sw_gpd) + { + ptr_priv->stats.tx_errors++; + osal_skb_free(ptr_skb); + } + else + { + /* map skb to dma */ + ptr_virt_addr = ptr_skb->data; + phy_addr = osal_skb_mapDma(ptr_skb, DMA_TO_DEVICE); + if (0x0 == phy_addr) + { + HAL_TAU_PKT_DBG(HAL_TAU_PKT_DBG_ERR, "u=%u, txch=%u, skb dma map err\n", + unit, channel); + ptr_priv->stats.tx_errors++; + osal_skb_free(ptr_skb); + osal_free(ptr_sw_gpd); + } + else + { + /* trans skb to gpd */ + memset(ptr_sw_gpd, 0x0, sizeof(HAL_TAU_PKT_TX_SW_GPD_T)); + ptr_sw_gpd->callback = (void *)_hal_tau_pkt_net_dev_tx_callback; + ptr_sw_gpd->ptr_cookie = (void *)ptr_skb; + ptr_sw_gpd->gpd_num = 1; + ptr_sw_gpd->ptr_next = NULL; + ptr_sw_gpd->channel = channel; + + /* prepare gpd */ + hal_tau_pkt_prepareGpd(unit, phy_addr, ptr_skb->len, ptr_priv->port, ptr_sw_gpd); + +#if LINUX_VERSION_CODE <= KERNEL_VERSION(4,6,7) + ptr_net_dev->trans_start = jiffies; +#else + netdev_get_tx_queue(ptr_net_dev, 0)->trans_start = jiffies; +#endif + /* send gpd */ + if (NPS_E_OK == hal_tau_pkt_sendGpd(unit, channel, ptr_sw_gpd)) + { + ptr_priv->stats.tx_packets++; + ptr_priv->stats.tx_bytes += ptr_skb->len; + } + else + { + ptr_priv->stats.tx_fifo_errors++; /* to record the extreme cases where packets are dropped */ + ptr_priv->stats.tx_dropped++; + + osal_skb_unmapDma(phy_addr, ptr_skb->len, DMA_TO_DEVICE); + osal_skb_free(ptr_skb); + osal_free(ptr_sw_gpd); + + return NETDEV_TX_OK; + } + } + } + + return NETDEV_TX_OK; +} + +static void +_hal_tau_pkt_net_dev_tx_timeout( + struct net_device *ptr_net_dev) +{ + netif_stop_queue(ptr_net_dev); + osal_sleepThread(1000); + netif_wake_queue(ptr_net_dev); +} + +static struct net_device_stats * +_hal_tau_pkt_net_dev_get_stats( + struct net_device *ptr_net_dev) +{ + struct net_device_priv *ptr_priv = netdev_priv(ptr_net_dev); + + return (&ptr_priv->stats); +} + +static int +_hal_tau_pkt_net_dev_set_mtu( + struct net_device *ptr_net_dev, + int new_mtu) +{ + if (new_mtu < 64 || new_mtu > 9216) + { + return -EINVAL; + } + ptr_net_dev->mtu = new_mtu; /* This mtu need to be synced to chip's */ + return 0; +} + +static int +_hal_tau_pkt_net_dev_set_mac( + struct net_device *ptr_net_dev, + void *ptr_mac_addr) +{ + struct sockaddr *ptr_addr = ptr_mac_addr; + + memcpy(ptr_net_dev->dev_addr, ptr_addr->sa_data, ptr_net_dev->addr_len); + return 0; +} + +static void +_hal_tau_pkt_net_dev_set_rx_mode( + struct net_device *ptr_dev) +{ + if (ptr_dev->flags & IFF_PROMISC) + { + } + else + { + if (ptr_dev->flags & IFF_ALLMULTI) + { + } + else + { + if (netdev_mc_empty(ptr_dev)) + { + return; + } + } + } +} + +static struct net_device_ops _hal_tau_pkt_net_dev_ops = +{ + .ndo_init = _hal_tau_pkt_net_dev_init, + .ndo_open = _hal_tau_pkt_net_dev_open, + .ndo_stop = _hal_tau_pkt_net_dev_stop, + .ndo_do_ioctl = _hal_tau_pkt_net_dev_ioctl, + .ndo_start_xmit = _hal_tau_pkt_net_dev_tx, + .ndo_tx_timeout = _hal_tau_pkt_net_dev_tx_timeout, + .ndo_get_stats = _hal_tau_pkt_net_dev_get_stats, + .ndo_change_mtu = _hal_tau_pkt_net_dev_set_mtu, + .ndo_set_mac_address = _hal_tau_pkt_net_dev_set_mac, + .ndo_set_rx_mode = _hal_tau_pkt_net_dev_set_rx_mode, +}; + +static int +_hal_tau_pkt_net_dev_ethtool_get( + struct net_device *ptr_dev, + struct ethtool_cmd *ptr_cmd) +{ + struct net_device_priv *ptr_priv; + + ptr_cmd->supported = SUPPORTED_1000baseT_Full | SUPPORTED_FIBRE; + ptr_cmd->port = PORT_FIBRE; + ptr_cmd->duplex = DUPLEX_FULL; + + ptr_priv = netdev_priv(ptr_dev); + ethtool_cmd_speed_set(ptr_cmd, ptr_priv->speed); + + return 0; +} + +static struct ethtool_ops _hal_tau_pkt_net_dev_ethtool_ops = +{ + .get_settings = _hal_tau_pkt_net_dev_ethtool_get, + .get_link = ethtool_op_get_link, +}; + +static void +_hal_tau_pkt_setup( + struct net_device *ptr_net_dev) +{ + struct net_device_priv *ptr_priv = netdev_priv(ptr_net_dev); + + /* setup net device */ + ether_setup(ptr_net_dev); + ptr_net_dev->netdev_ops = &_hal_tau_pkt_net_dev_ops; + ptr_net_dev->ethtool_ops = &_hal_tau_pkt_net_dev_ethtool_ops; + ptr_net_dev->watchdog_timeo = HAL_TAU_PKT_TX_TIMEOUT; + ptr_net_dev->mtu = HAL_TAU_PKT_MAX_ETH_FRAME_SIZE; /* This mtu need to be synced to chip's */ + random_ether_addr(ptr_net_dev->dev_addr); /* Please use the mac-addr of interface. */ + + /* setup private data */ + ptr_priv->ptr_net_dev = ptr_net_dev; + memset(&ptr_priv->stats, 0, sizeof(struct net_device_stats)); +} + +static void +_hal_tau_pkt_lockRxChannelAll( + const UI32_T unit) +{ + UI32_T rch; + HAL_TAU_PKT_RX_PDMA_T *ptr_rx_pdma; + + for (rch = 0; rch < HAL_TAU_PKT_RX_CHANNEL_LAST; rch++) + { + ptr_rx_pdma = HAL_TAU_PKT_GET_RX_PDMA_PTR(unit, rch); + osal_takeSemaphore(&ptr_rx_pdma->sema, NPS_SEMAPHORE_WAIT_FOREVER); + } +} + +static void +_hal_tau_pkt_unlockRxChannelAll( + const UI32_T unit) +{ + UI32_T rch; + HAL_TAU_PKT_RX_PDMA_T *ptr_rx_pdma; + + for (rch = 0; rch < HAL_TAU_PKT_RX_CHANNEL_LAST; rch++) + { + ptr_rx_pdma = HAL_TAU_PKT_GET_RX_PDMA_PTR(unit, rch); + osal_giveSemaphore(&ptr_rx_pdma->sema); + } +} + +static NPS_ERROR_NO_T +_hal_tau_pkt_createIntf( + const UI32_T unit, + HAL_TAU_PKT_IOCTL_NETIF_COOKIE_T *ptr_cookie) +{ + HAL_TAU_PKT_NETIF_INTF_T net_intf = {0}; + HAL_TAU_PKT_NETIF_PORT_DB_T *ptr_port_db; + struct net_device *ptr_net_dev = NULL; + struct net_device_priv *ptr_priv = NULL; + NPS_ERROR_NO_T rc = NPS_E_OK; + + /* Lock all Rx tasks to avoid any access to the intf during packet processing */ + /* Only Rx tasks are locked since Tx action is performed under a spinlock protection */ + _hal_tau_pkt_lockRxChannelAll(unit); + + osal_io_copyFromUser(&net_intf, &ptr_cookie->net_intf, sizeof(HAL_TAU_PKT_NETIF_INTF_T)); + + HAL_TAU_PKT_DBG(HAL_TAU_PKT_DBG_INTF, "u=%u, create intf name=%s, phy port=%d\n", + unit, net_intf.name, net_intf.port); + + /* To check if the interface with the same name exists in kernel */ + ptr_net_dev = dev_get_by_name(&init_net, net_intf.name); + if (NULL != ptr_net_dev) + { + HAL_TAU_PKT_DBG((HAL_TAU_PKT_DBG_ERR | HAL_TAU_PKT_DBG_INTF), + "u=%u, create intf failed, exist same name=%s\n", + unit, net_intf.name); + + dev_put(ptr_net_dev); + +#if defined(HAL_TAU_PKT_FORCR_REMOVE_DUPLICATE_NETDEV) + ptr_net_dev->operstate = IF_OPER_DOWN; + netif_carrier_off(ptr_net_dev); + netif_stop_queue(ptr_net_dev); + unregister_netdev(ptr_net_dev); + free_netdev(ptr_net_dev); +#endif + _hal_tau_pkt_unlockRxChannelAll(unit); + return (NPS_E_ENTRY_EXISTS); + } + + /* Bind the net dev and intf meta data to internel port-based array */ + ptr_port_db = HAL_TAU_PKT_GET_PORT_DB(net_intf.port); + if (ptr_port_db->ptr_net_dev == NULL) + { + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 17, 0) + ptr_net_dev = alloc_netdev(sizeof(struct net_device_priv), + net_intf.name, NET_NAME_UNKNOWN, _hal_tau_pkt_setup); +#else + ptr_net_dev = alloc_netdev(sizeof(struct net_device_priv), + net_intf.name, _hal_tau_pkt_setup); +#endif + memcpy(ptr_net_dev->dev_addr, net_intf.mac, ptr_net_dev->addr_len); + + ptr_priv = netdev_priv(ptr_net_dev); + + /* Port info will be used when packet sent from this netdev */ + ptr_priv->port = net_intf.port; + ptr_priv->id = net_intf.port; + ptr_priv->unit = unit; + + register_netdev(ptr_net_dev); + + net_intf.id = net_intf.port; /* Currently, id is 1-to-1 mapped to port */ + osal_memcpy(&ptr_port_db->meta, &net_intf, sizeof(HAL_TAU_PKT_NETIF_INTF_T)); + + ptr_port_db->ptr_net_dev = ptr_net_dev; + + /* Copy the intf-id to user space */ + osal_io_copyToUser(&ptr_cookie->net_intf, &net_intf, sizeof(HAL_TAU_PKT_NETIF_INTF_T)); + } + else + { + HAL_TAU_PKT_DBG((HAL_TAU_PKT_DBG_INTF | HAL_TAU_PKT_DBG_ERR), + "u=%u, create intf failed, exist on phy port=%d\n", + unit, net_intf.port); + /* The user needs to delete the existing intf binding to the same port */ + rc = NPS_E_ENTRY_EXISTS; + } + + osal_io_copyToUser(&ptr_cookie->rc, &rc, sizeof(NPS_ERROR_NO_T)); + + _hal_tau_pkt_unlockRxChannelAll(unit); + + return (NPS_E_OK); +} + +static NPS_ERROR_NO_T +_hal_tau_pkt_destroyIntf( + const UI32_T unit, + HAL_TAU_PKT_IOCTL_NETIF_COOKIE_T *ptr_cookie) +{ + HAL_TAU_PKT_NETIF_INTF_T net_intf = {0}; + HAL_TAU_PKT_NETIF_PORT_DB_T *ptr_port_db; + UI32_T port = 0; + NPS_ERROR_NO_T rc = NPS_E_ENTRY_NOT_FOUND; + + /* Lock all Rx tasks to avoid any access to the intf during packet processing */ + /* Only Rx tasks are locked since Tx action is performed under a spinlock protection */ + _hal_tau_pkt_lockRxChannelAll(unit); + + osal_io_copyFromUser(&net_intf, &ptr_cookie->net_intf, sizeof(HAL_TAU_PKT_NETIF_INTF_T)); + + /* Unregister net devices by id, although the "id" is now relavent to "port" we still perform a search */ + for (port = 0; port < HAL_TAU_PKT_MAX_PORT_NUM; port++) + { + ptr_port_db = HAL_TAU_PKT_GET_PORT_DB(port); + if (NULL != ptr_port_db->ptr_net_dev) /* valid intf */ + { + if (ptr_port_db->meta.id == net_intf.id) + { + HAL_TAU_PKT_DBG(HAL_TAU_PKT_DBG_INTF, + "u=%u, find intf %s (id=%d) on phy port=%d, destroy done\n", + unit, + ptr_port_db->meta.name, + ptr_port_db->meta.port); + + netif_carrier_off(ptr_port_db->ptr_net_dev); + netif_stop_queue(ptr_port_db->ptr_net_dev); + unregister_netdev(ptr_port_db->ptr_net_dev); + free_netdev(ptr_port_db->ptr_net_dev); + + /* Don't need to remove profiles on this port. + * In fact, the profile is binding to "port" not "intf". + */ + /* _hal_tau_pkt_destroyProfList(ptr_port_db->ptr_profile_list); */ + + osal_memset(ptr_port_db, 0x0, sizeof(HAL_TAU_PKT_NETIF_PORT_DB_T)); + rc = NPS_E_OK; + break; + } + } + } + + osal_io_copyToUser(&ptr_cookie->rc, &rc, sizeof(NPS_ERROR_NO_T)); + + _hal_tau_pkt_unlockRxChannelAll(unit); + + return (NPS_E_OK); +} + +static NPS_ERROR_NO_T +_hal_tau_pkt_traverseProfList( + UI32_T intf_id, + HAL_TAU_PKT_PROFILE_NODE_T *ptr_prof_list) +{ + HAL_TAU_PKT_PROFILE_NODE_T *ptr_curr_node; + + ptr_curr_node = ptr_prof_list; + + HAL_TAU_PKT_DBG(HAL_TAU_PKT_DBG_INTF, "intf id=%d, prof list=\n", intf_id); + while(NULL != ptr_curr_node) + { + HAL_TAU_PKT_DBG(HAL_TAU_PKT_DBG_INTF, "%s (%d) => ", + ptr_curr_node->ptr_profile->name, + ptr_curr_node->ptr_profile->priority); + ptr_curr_node = ptr_curr_node->ptr_next_node; + } + HAL_TAU_PKT_DBG(HAL_TAU_PKT_DBG_INTF, "null\n"); + return (NPS_E_OK); +} + + +static NPS_ERROR_NO_T +_hal_tau_pkt_getIntf( + const UI32_T unit, + HAL_TAU_PKT_IOCTL_NETIF_COOKIE_T *ptr_cookie) +{ + HAL_TAU_PKT_NETIF_INTF_T net_intf = {0}; + HAL_TAU_PKT_NETIF_PORT_DB_T *ptr_port_db; + UI32_T port = 0; + NPS_ERROR_NO_T rc = NPS_E_ENTRY_NOT_FOUND; + + osal_io_copyFromUser(&net_intf, &ptr_cookie->net_intf, sizeof(HAL_TAU_PKT_NETIF_INTF_T)); + + for (port = 0; port < HAL_TAU_PKT_MAX_PORT_NUM; port++) + { + ptr_port_db = HAL_TAU_PKT_GET_PORT_DB(port); + if (NULL != ptr_port_db->ptr_net_dev) /* valid intf */ + { + if (ptr_port_db->meta.id == net_intf.id) + { + HAL_TAU_PKT_DBG(HAL_TAU_PKT_DBG_INTF, "u=%u, find intf id=%d\n", unit, net_intf.id); + _hal_tau_pkt_traverseProfList(net_intf.id, ptr_port_db->ptr_profile_list); + osal_io_copyToUser(&ptr_cookie->net_intf, &ptr_port_db->meta, sizeof(HAL_TAU_PKT_NETIF_INTF_T)); + rc = NPS_E_OK; + break; + } + } + } + + osal_io_copyToUser(&ptr_cookie->rc, &rc, sizeof(NPS_ERROR_NO_T)); + + return (NPS_E_OK); +} + +static HAL_TAU_PKT_NETIF_PROFILE_T * +_hal_tau_pkt_getProfEntry( + const UI32_T id) +{ + HAL_TAU_PKT_NETIF_PROFILE_T *ptr_profile = NULL; + + if (id < HAL_TAU_PKT_NET_PROFILE_NUM_MAX) + { + if (NULL != _ptr_hal_tau_pkt_profile_entry[id]) + { + ptr_profile = _ptr_hal_tau_pkt_profile_entry[id]; + } + } + + return (ptr_profile); +} + +static NPS_ERROR_NO_T +_hal_tau_pkt_createProfile( + const UI32_T unit, + HAL_TAU_PKT_IOCTL_NETIF_COOKIE_T *ptr_cookie) +{ + HAL_TAU_PKT_NETIF_PROFILE_T *ptr_profile; + HAL_TAU_PKT_NETIF_PORT_DB_T *ptr_port_db; + NPS_ERROR_NO_T rc; + + /* Lock all Rx tasks to avoid profiles being refered during packet processing */ + /* Need to lock all Rx tasks since packets from all Rx channels do profile lookup */ + _hal_tau_pkt_lockRxChannelAll(unit); + + ptr_profile = osal_alloc(sizeof(HAL_TAU_PKT_NETIF_PROFILE_T)); + osal_io_copyFromUser(ptr_profile, &ptr_cookie->net_profile, + sizeof(HAL_TAU_PKT_NETIF_PROFILE_T)); + + HAL_TAU_PKT_DBG(HAL_TAU_PKT_DBG_PROFILE, + "u=%u, create prof name=%s, priority=%d, flag=0x%x\n", + unit, + ptr_profile->name, + ptr_profile->priority, + ptr_profile->flags); + + /* Save the profile to the profile array and assign the index to ptr_profile->id */ + rc = _hal_tau_pkt_allocProfEntry(ptr_profile); + if (NPS_E_OK == rc) + { + /* Insert the profile to the corresponding (port) interface */ + if ((ptr_profile->flags & HAL_TAU_PKT_NETIF_PROFILE_FLAGS_PORT) != 0) + { + HAL_TAU_PKT_DBG(HAL_TAU_PKT_DBG_PROFILE, + "u=%u, bind prof to phy port=%d\n", unit, ptr_profile->port); + ptr_port_db = HAL_TAU_PKT_GET_PORT_DB(ptr_profile->port); + _hal_tau_pkt_addProfToList(ptr_profile, &ptr_port_db->ptr_profile_list); + } + else + { + HAL_TAU_PKT_DBG(HAL_TAU_PKT_DBG_PROFILE, + "u=%u, bind prof to all intf\n", unit); + _hal_tau_pkt_addProfToAllIntf(ptr_profile); + } + + /* Copy the ptr_profile->id to user space */ + osal_io_copyToUser(&ptr_cookie->net_profile, ptr_profile, sizeof(HAL_TAU_PKT_NETIF_PROFILE_T)); + } + else + { + HAL_TAU_PKT_DBG((HAL_TAU_PKT_DBG_PROFILE | HAL_TAU_PKT_DBG_ERR), + "u=%u, alloc prof entry failed, tbl full\n", unit); + osal_free(ptr_profile); + } + + osal_io_copyToUser(&ptr_cookie->rc, &rc, sizeof(NPS_ERROR_NO_T)); + + _hal_tau_pkt_unlockRxChannelAll(unit); + + return (NPS_E_OK); +} + +static NPS_ERROR_NO_T +_hal_tau_pkt_destroyProfile( + const UI32_T unit, + HAL_TAU_PKT_IOCTL_NETIF_COOKIE_T *ptr_cookie) +{ + HAL_TAU_PKT_NETIF_PROFILE_T profile = {0}; + HAL_TAU_PKT_NETIF_PROFILE_T *ptr_profile; + NPS_ERROR_NO_T rc = NPS_E_OK; + + /* Lock all Rx tasks to avoid profiles being refered during packet processing */ + /* Need to lock all Rx tasks since packets from all Rx channels do profile lookup */ + _hal_tau_pkt_lockRxChannelAll(unit); + + osal_io_copyFromUser(&profile, &ptr_cookie->net_profile, + sizeof(HAL_TAU_PKT_NETIF_PROFILE_T)); + + /* Remove the profile from corresponding interface (port) */ + _hal_tau_pkt_delProfFromAllIntfById(profile.id); + + ptr_profile = _hal_tau_pkt_freeProfEntry(profile.id); + if (NULL != ptr_profile) + { + HAL_TAU_PKT_DBG(HAL_TAU_PKT_DBG_PROFILE, + "u=%u, destroy prof id=%d, name=%s, priority=%d, flag=0x%x\n", + unit, + ptr_profile->id, + ptr_profile->name, + ptr_profile->priority, + ptr_profile->flags); + osal_free(ptr_profile); + } + + osal_io_copyToUser(&ptr_cookie->rc, &rc, sizeof(NPS_ERROR_NO_T)); + + _hal_tau_pkt_unlockRxChannelAll(unit); + + return (NPS_E_OK); +} + +static NPS_ERROR_NO_T +_hal_tau_pkt_getProfile( + const UI32_T unit, + HAL_TAU_PKT_IOCTL_NETIF_COOKIE_T *ptr_cookie) +{ + HAL_TAU_PKT_NETIF_PROFILE_T profile = {0}; + HAL_TAU_PKT_NETIF_PROFILE_T *ptr_profile; + NPS_ERROR_NO_T rc = NPS_E_OK; + + osal_io_copyFromUser(&profile, &ptr_cookie->net_profile, sizeof(HAL_TAU_PKT_NETIF_PROFILE_T)); + + ptr_profile = _hal_tau_pkt_getProfEntry(profile.id); + if (NULL != ptr_profile) + { + osal_io_copyToUser(&ptr_cookie->net_profile, ptr_profile, sizeof(HAL_TAU_PKT_NETIF_PROFILE_T)); + } + else + { + rc = NPS_E_ENTRY_NOT_FOUND; + } + + osal_io_copyToUser(&ptr_cookie->rc, &rc, sizeof(NPS_ERROR_NO_T)); + + return (NPS_E_OK); +} + +static NPS_ERROR_NO_T +_hal_tau_pkt_getIntfCnt( + const UI32_T unit, + HAL_TAU_PKT_IOCTL_NETIF_COOKIE_T *ptr_cookie) +{ + HAL_TAU_PKT_NETIF_INTF_T net_intf = {0}; + HAL_TAU_PKT_NETIF_INTF_CNT_T intf_cnt = {0}; + HAL_TAU_PKT_NETIF_PORT_DB_T *ptr_port_db; + struct net_device_priv *ptr_priv; + UI32_T port = 0; + NPS_ERROR_NO_T rc = NPS_E_ENTRY_NOT_FOUND; + + osal_io_copyFromUser(&net_intf, &ptr_cookie->net_intf, sizeof(HAL_TAU_PKT_NETIF_INTF_T)); + + for (port = 0; port < HAL_TAU_PKT_MAX_PORT_NUM; port++) + { + ptr_port_db = HAL_TAU_PKT_GET_PORT_DB(port); + if (NULL != ptr_port_db->ptr_net_dev) /* valid intf */ + { + if (ptr_port_db->meta.id == net_intf.id) + { + ptr_priv = netdev_priv(ptr_port_db->ptr_net_dev); + intf_cnt.rx_pkt = ptr_priv->stats.rx_packets; + intf_cnt.tx_pkt = ptr_priv->stats.tx_packets; + intf_cnt.tx_error = ptr_priv->stats.tx_errors; + intf_cnt.tx_queue_full = ptr_priv->stats.tx_fifo_errors; + + rc = NPS_E_OK; + break; + } + } + } + + osal_io_copyToUser(&ptr_cookie->cnt, &intf_cnt, sizeof(HAL_TAU_PKT_NETIF_INTF_CNT_T)); + osal_io_copyToUser(&ptr_cookie->rc, &rc, sizeof(NPS_ERROR_NO_T)); + + return (NPS_E_OK); +} + +static NPS_ERROR_NO_T +_hal_tau_pkt_clearIntfCnt( + const UI32_T unit, + HAL_TAU_PKT_IOCTL_NETIF_COOKIE_T *ptr_cookie) +{ + HAL_TAU_PKT_NETIF_INTF_T net_intf = {0}; + HAL_TAU_PKT_NETIF_PORT_DB_T *ptr_port_db; + struct net_device_priv *ptr_priv; + UI32_T port = 0; + NPS_ERROR_NO_T rc = NPS_E_ENTRY_NOT_FOUND; + + osal_io_copyFromUser(&net_intf, &ptr_cookie->net_intf, sizeof(HAL_TAU_PKT_NETIF_INTF_T)); + + for (port = 0; port < HAL_TAU_PKT_MAX_PORT_NUM; port++) + { + ptr_port_db = HAL_TAU_PKT_GET_PORT_DB(port); + if (NULL != ptr_port_db->ptr_net_dev) /* valid intf */ + { + if (ptr_port_db->meta.id == net_intf.id) + { + ptr_priv = netdev_priv(ptr_port_db->ptr_net_dev); + ptr_priv->stats.rx_packets = 0; + ptr_priv->stats.tx_packets = 0; + ptr_priv->stats.tx_errors = 0; + ptr_priv->stats.tx_fifo_errors = 0; + + rc = NPS_E_OK; + break; + } + } + } + + osal_io_copyToUser(&ptr_cookie->rc, &rc, sizeof(NPS_ERROR_NO_T)); + + return (NPS_E_OK); +} + +/* ----------------------------------------------------------------------------------- Init: dev_ops */ +static int +_hal_tau_pkt_dev_open( + struct inode *inode, + struct file *file) +{ + return (0); +} + +static int +_hal_tau_pkt_dev_close( + struct inode *inode, + struct file *file) +{ + return (0); +} + +static void +_hal_tau_pkt_dev_tx_callback( + const UI32_T unit, + HAL_TAU_PKT_TX_SW_GPD_T *ptr_sw_gpd, + HAL_TAU_PKT_TX_SW_GPD_T *ptr_sw_gpd_usr) +{ + UI32_T channel = ptr_sw_gpd->channel; + HAL_TAU_PKT_TX_CB_T *ptr_tx_cb = HAL_TAU_PKT_GET_TX_CB_PTR(unit); + + while (0 != _hal_tau_pkt_enQueue(&ptr_tx_cb->sw_queue, ptr_sw_gpd)) + { + ptr_tx_cb->cnt.channel[channel].enque_retry++; + HAL_TAU_PKT_TX_ENQUE_RETRY_SLEEP(); + } + ptr_tx_cb->cnt.channel[channel].enque_ok++; + + osal_triggerEvent(&ptr_tx_cb->sync_sema); + ptr_tx_cb->cnt.channel[channel].trig_event++; +} + +static ssize_t +_hal_tau_pkt_dev_tx( + struct file *file, + const char __user *buf, + size_t count, + loff_t *pos) +{ + int ret = 0; + int idx = 0; + unsigned int unit = 0; + unsigned int channel = 0; + HAL_TAU_PKT_IOCTL_TX_COOKIE_T tx_cookie; + HAL_TAU_PKT_IOCTL_TX_GPD_T ioctl_gpd; + HAL_TAU_PKT_TX_SW_GPD_T *ptr_sw_gpd_knl = NULL; + HAL_TAU_PKT_TX_SW_GPD_T *ptr_first_sw_gpd_knl = NULL; + + /* copy the tx-cookie */ + osal_io_copyFromUser(&tx_cookie, (void *)buf, sizeof(HAL_TAU_PKT_IOCTL_TX_COOKIE_T)); + + unit = tx_cookie.unit; + channel = tx_cookie.channel; + + ptr_sw_gpd_knl = osal_alloc(sizeof(HAL_TAU_PKT_TX_SW_GPD_T)); + ptr_first_sw_gpd_knl = ptr_sw_gpd_knl; + + /* create SW GPD based on the content of each IOCTL GPD */ + while (1) + { + osal_io_copyFromUser(&ioctl_gpd, + ((void *)((NPS_HUGE_T)tx_cookie.ioctl_gpd_addr)) + +idx*sizeof(HAL_TAU_PKT_IOCTL_TX_GPD_T), + sizeof(HAL_TAU_PKT_IOCTL_TX_GPD_T)); + + ptr_sw_gpd_knl->channel = ioctl_gpd.channel; + ptr_sw_gpd_knl->gpd_num = ioctl_gpd.gpd_num; + ptr_sw_gpd_knl->ptr_cookie = (void *)ioctl_gpd.cookie; + + /* directly copy user's HW GPD */ + osal_io_copyFromUser(&ptr_sw_gpd_knl->tx_gpd, + (void *)((NPS_HUGE_T)ioctl_gpd.hw_gpd_addr), + sizeof(HAL_TAU_PKT_TX_GPD_T)); + + /* replace the callback */ + ptr_sw_gpd_knl->callback = (void *)_hal_tau_pkt_dev_tx_callback; + + /* save the first SW GPD address from userspace since + * we have replaced the original callback + */ + ptr_sw_gpd_knl->ptr_cookie = (void *)ioctl_gpd.sw_gpd_addr; + + if (HAL_TAU_PKT_CH_LAST_GPD == ptr_sw_gpd_knl->tx_gpd.ch) + { + ptr_sw_gpd_knl->ptr_next = NULL; + break; + } + else + { + ptr_sw_gpd_knl->ptr_next = (HAL_TAU_PKT_TX_SW_GPD_T *)osal_alloc( + sizeof(HAL_TAU_PKT_TX_SW_GPD_T)); + ptr_sw_gpd_knl = ptr_sw_gpd_knl->ptr_next; + idx++; + } + } + + ret = hal_tau_pkt_sendGpd(unit, channel, ptr_first_sw_gpd_knl); + if (NPS_E_OK != ret) + { + _hal_tau_pkt_freeTxGpdList(unit, ptr_first_sw_gpd_knl); + } + + /* return 0 if success */ + return (ret); +} + +static ssize_t +_hal_tau_pkt_dev_rx( + struct file *file, + char __user *buf, + size_t count, + loff_t *pos) +{ + return (0); +} + +static long +_hal_tau_pkt_dev_ioctl( + struct file *filp, + unsigned int cmd, + unsigned long arg) +{ + int ret = 0; + + /* cmd */ + HAL_TAU_PKT_IOCTL_CMD_T *ptr_cmd = (HAL_TAU_PKT_IOCTL_CMD_T *)&cmd; + unsigned int unit = ptr_cmd->field.unit; + HAL_TAU_PKT_IOCTL_TYPE_T type = ptr_cmd->field.type; + + switch (type) + { + HAL_TAU_PKT_DBG(HAL_TAU_PKT_DBG_COMMON, "u=%u, ioctl type=%u, cmd=%u\n", + unit, type, cmd); + + /* network interface */ + case HAL_TAU_PKT_IOCTL_TYPE_CREATE_INTF: + ret = _hal_tau_pkt_createIntf(unit, (HAL_TAU_PKT_IOCTL_NETIF_COOKIE_T *)arg); + break; + + case HAL_TAU_PKT_IOCTL_TYPE_DESTROY_INTF: + ret = _hal_tau_pkt_destroyIntf(unit, (HAL_TAU_PKT_IOCTL_NETIF_COOKIE_T *)arg); + break; + + case HAL_TAU_PKT_IOCTL_TYPE_GET_INTF: + ret = _hal_tau_pkt_getIntf(unit, (HAL_TAU_PKT_IOCTL_NETIF_COOKIE_T *)arg); + break; + + case HAL_TAU_PKT_IOCTL_TYPE_CREATE_PROFILE: + ret = _hal_tau_pkt_createProfile(unit, (HAL_TAU_PKT_IOCTL_NETIF_COOKIE_T *)arg); + break; + + case HAL_TAU_PKT_IOCTL_TYPE_DESTROY_PROFILE: + ret = _hal_tau_pkt_destroyProfile(unit, (HAL_TAU_PKT_IOCTL_NETIF_COOKIE_T *)arg); + break; + + case HAL_TAU_PKT_IOCTL_TYPE_GET_PROFILE: + ret = _hal_tau_pkt_getProfile(unit, (HAL_TAU_PKT_IOCTL_NETIF_COOKIE_T *)arg); + break; + + case HAL_TAU_PKT_IOCTL_TYPE_GET_INTF_CNT: + ret = _hal_tau_pkt_getIntfCnt(unit, (HAL_TAU_PKT_IOCTL_NETIF_COOKIE_T *)arg); + break; + + case HAL_TAU_PKT_IOCTL_TYPE_CLEAR_INTF_CNT: + ret = _hal_tau_pkt_clearIntfCnt(unit, (HAL_TAU_PKT_IOCTL_NETIF_COOKIE_T *)arg); + break; + + /* driver */ + case HAL_TAU_PKT_IOCTL_TYPE_WAIT_RX_FREE: + ret = _hal_tau_pkt_schedRxDeQueue(unit, (HAL_TAU_PKT_IOCTL_RX_COOKIE_T *)arg); + break; + + case HAL_TAU_PKT_IOCTL_TYPE_WAIT_TX_FREE: + ret = _hal_tau_pkt_strictTxDeQueue(unit, (HAL_TAU_PKT_IOCTL_TX_COOKIE_T *)arg); + break; + + case HAL_TAU_PKT_IOCTL_TYPE_SET_RX_CFG: + ret = hal_tau_pkt_setRxKnlConfig(unit, (HAL_TAU_PKT_IOCTL_RX_COOKIE_T *)arg); + break; + + case HAL_TAU_PKT_IOCTL_TYPE_GET_RX_CFG: + ret = hal_tau_pkt_getRxKnlConfig(unit, (HAL_TAU_PKT_IOCTL_RX_COOKIE_T *)arg); + break; + + case HAL_TAU_PKT_IOCTL_TYPE_DEINIT_TASK: + ret = hal_tau_pkt_deinitTask(unit); + break; + + case HAL_TAU_PKT_IOCTL_TYPE_DEINIT_DRV: + ret = hal_tau_pkt_deinitPktDrv(unit); + break; + + case HAL_TAU_PKT_IOCTL_TYPE_INIT_TASK: + ret = hal_tau_pkt_initTask(unit); + break; + + case HAL_TAU_PKT_IOCTL_TYPE_INIT_DRV: + ret = hal_tau_pkt_initPktDrv(unit); + break; + + /* counter */ + case HAL_TAU_PKT_IOCTL_TYPE_GET_TX_CNT: + ret = hal_tau_pkt_getTxKnlCnt(unit, (HAL_TAU_PKT_IOCTL_CH_CNT_COOKIE_T *)arg); + break; + + case HAL_TAU_PKT_IOCTL_TYPE_GET_RX_CNT: + ret = hal_tau_pkt_getRxKnlCnt(unit, (HAL_TAU_PKT_IOCTL_CH_CNT_COOKIE_T *)arg); + break; + + case HAL_TAU_PKT_IOCTL_TYPE_CLEAR_TX_CNT: + ret = hal_tau_pkt_clearTxKnlCnt(unit, (HAL_TAU_PKT_IOCTL_TX_COOKIE_T *)arg); + break; + + case HAL_TAU_PKT_IOCTL_TYPE_CLEAR_RX_CNT: + ret = hal_tau_pkt_clearRxKnlCnt(unit, (HAL_TAU_PKT_IOCTL_RX_COOKIE_T *)arg); + break; + + case HAL_TAU_PKT_IOCTL_TYPE_SET_PORT_ATTR: + ret = hal_tau_pkt_setPortAttr(unit, (HAL_TAU_PKT_IOCTL_PORT_COOKIE_T *)arg); + break; + + default: + ret = -1; + break; + } + + return (ret); +} + +#ifdef CONFIG_COMPAT +static long +_hal_tau_pkt_dev_compat_ioctl( + struct file *filp, + unsigned int cmd, + unsigned long arg) +{ + return _hal_tau_pkt_dev_ioctl(filp, cmd, (unsigned long)compat_ptr(arg)); +} +#endif + +static struct file_operations _hal_tau_pkt_dev_ops = +{ + .owner = THIS_MODULE, + .open = _hal_tau_pkt_dev_open, + .release = _hal_tau_pkt_dev_close, + .write = _hal_tau_pkt_dev_tx, + .read = _hal_tau_pkt_dev_rx, + .unlocked_ioctl = _hal_tau_pkt_dev_ioctl, +#ifdef CONFIG_COMPAT + .compat_ioctl = _hal_tau_pkt_dev_compat_ioctl, +#endif +}; + +static struct miscdevice _hal_tau_pkt_dev = +{ + .minor = HAL_TAU_PKT_DRIVER_MINOR_NUM, + .name = HAL_TAU_PKT_DRIVER_NAME, + .fops = &_hal_tau_pkt_dev_ops, +}; + +/* ----------------------------------------------------------------------------------- Init/Deinit */ +static int __init +_hal_tau_pkt_init(void) +{ + /* Register device */ + misc_register(&_hal_tau_pkt_dev); + + /* Init Thread */ + osal_init(); + + /* Reset all database*/ + osal_memset(_hal_tau_pkt_port_db, 0x0, + (HAL_TAU_PKT_MAX_PORT_NUM * sizeof(HAL_TAU_PKT_NETIF_PORT_DB_T))); + osal_memset(_hal_tau_pkt_rx_cb, 0x0, + NPS_CFG_MAXIMUM_CHIPS_PER_SYSTEM*sizeof(HAL_TAU_PKT_RX_CB_T)); + osal_memset(_hal_tau_pkt_tx_cb, 0x0, + NPS_CFG_MAXIMUM_CHIPS_PER_SYSTEM*sizeof(HAL_TAU_PKT_TX_CB_T)); + osal_memset(_hal_tau_pkt_drv_cb, 0x0, + NPS_CFG_MAXIMUM_CHIPS_PER_SYSTEM*sizeof(HAL_TAU_PKT_DRV_CB_T)); + + return (0); +} + +static void __exit +_hal_tau_pkt_exit(void) +{ + UI32_T unit = 0; + + /* 1st. Stop Rx HW DMA and free all the DMA buffer hooked on the ring */ + _hal_tau_pkt_rxStop(unit); + + /* 2nd. Need to wait Rx done task process all the availavle packets on GPD ring */ +#define HAL_TAU_PKT_MODULE_EXIT_HOLD_TIME_US (1000000) + osal_sleepThread(HAL_TAU_PKT_MODULE_EXIT_HOLD_TIME_US); + + /* 3rd. Stop all netdev (if any) to prevent kernel from Tx new packets */ + _hal_tau_pkt_stopAllIntf(unit); + + /* 4th. Stop all the internal tasks (if any) */ + hal_tau_pkt_deinitTask(unit); + + /* 5th. Deinit pkt driver for common database/interrupt source (if required) */ + hal_tau_pkt_deinitPktDrv(unit); + + /* 6th. Clean up those intf/profiles not been destroyed */ + _hal_tau_pkt_destroyAllProfile(unit); + _hal_tau_pkt_destroyAllIntf(unit); + + osal_deinit(); + + /* Unregister device */ + misc_deregister(&_hal_tau_pkt_dev); +} + +module_init(_hal_tau_pkt_init); +module_exit(_hal_tau_pkt_exit); + +module_param(dbg_flag, uint, S_IRUGO); +MODULE_PARM_DESC(dbg_flag, "bit0:Error, bit1:Tx, bit2:Rx, bit3:Intf, bit4:Profile"); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Nephos"); +MODULE_DESCRIPTION("NETIF Kernel Module"); diff --git a/platform/nephos/nephos-modules/modules/src/inc/aml.h b/platform/nephos/nephos-modules/modules/src/inc/aml.h new file mode 100755 index 000000000000..658aa6e56f46 --- /dev/null +++ b/platform/nephos/nephos-modules/modules/src/inc/aml.h @@ -0,0 +1,366 @@ +/* Copyright (C) 2019 Nephos, Inc. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of version 2 of the GNU General Public + * License as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * version 2 along with this program. + */ + +/* FILE NAME: aml.h + * PURPOSE: + * 1. Provide whole AML resource initialization API. + * 2. Provide configuration access APIs. + * 3. Provide ISR registration and deregistration APIs. + * 4. Provide memory access. + * 5. Provide DMA management APIs. + * 6. Provide address translation APIs. + * NOTES: + */ + +#ifndef AML_H +#define AML_H + + +/* INCLUDE FILE DECLARATIONS + */ +#include +#include +#include + + +/* NAMING CONSTANT DECLARATIONS + */ + +/* #define AML_EN_I2C */ +/* #define AML_EN_CUSTOM_DMA_ADDR */ + +/* MACRO FUNCTION DECLARATIONS + */ + +/* DATA TYPE DECLARATIONS + */ +typedef enum +{ + AML_DEV_TYPE_PCI, + AML_DEV_TYPE_I2C, + AML_DEV_TYPE_SPI, + AML_DEV_TYPE_LAST + +} AML_HW_IF_T; + +typedef NPS_ERROR_NO_T +(*AML_DEV_READ_FUNC_T)( + const UI32_T unit, + const UI32_T addr_offset, + UI32_T *ptr_data, + const UI32_T len); + +typedef NPS_ERROR_NO_T +(*AML_DEV_WRITE_FUNC_T)( + const UI32_T unit, + const UI32_T addr_offset, + const UI32_T *ptr_data, + const UI32_T len); + +typedef NPS_ERROR_NO_T +(*AML_DEV_ISR_FUNC_T)( + void *ptr_data); + +/* To mask the chip interrupt in kernel interrupt routine. */ +typedef struct +{ + UI32_T mask_addr; + UI32_T mask_val; + +} AML_DEV_ISR_DATA_T; + +/* To read or write the HW-intf registers. */ +typedef struct +{ + AML_DEV_READ_FUNC_T read_callback; + AML_DEV_WRITE_FUNC_T write_callback; + +} AML_DEV_ACCESS_T; + +typedef struct +{ + UI32_T vendor; + UI32_T device; + UI32_T revision; + +} AML_DEV_ID_T; + + +typedef struct +{ + AML_HW_IF_T if_type; + AML_DEV_ID_T id; + AML_DEV_ACCESS_T access; + +} AML_DEV_T; + +/* EXPORTED SUBPROGRAM SPECIFICATIONS + */ + +/* FUNCTION NAME: aml_getRunMode + * PURPOSE: + * To get current SDK running mode. + * INPUT: + * unit -- the device unit + * OUTPUT: + * ptr_mode -- current running mode + * RETURN: + * NPS_E_OK -- Successfully get the running mode. + * NOTES: + * none + */ +NPS_ERROR_NO_T +aml_getRunMode( + const UI32_T unit, + UI32_T *ptr_mode); + +/* FUNCTION NAME: aml_init + * PURPOSE: + * To initialize the DMA memory and interface-related kernel source + * such as PCIe/I2C/SPI. + * INPUT: + * none + * OUTPUT: + * none + * RETURN: + * NPS_E_OK -- Successfully initialize AML module. + * NPS_E_OTHERS -- Failed to initialize AML module. + * NOTES: + * none + */ +NPS_ERROR_NO_T +aml_deinit(void); + +/* FUNCTION NAME: aml_init + * PURPOSE: + * To initialize the DMA memory and interface-related kernel source + * such as PCIe/I2C/SPI. + * INPUT: + * none + * OUTPUT: + * none + * RETURN: + * NPS_E_OK -- Successfully initialize AML module. + * NPS_E_OTHERS -- Failed to initialize AML module. + * NOTES: + * none + */ +NPS_ERROR_NO_T +aml_init(void); + +/* FUNCTION NAME: aml_getNumberOfChip + * PURPOSE: + * To get the number of chips connected to host CPU. + * INPUT: + * none + * OUTPUT: + * ptr_num -- pointer for the chip number + * RETURN: + * NPS_E_OK -- Successfully get the number of chips. + * NOTES: + * none + */ +NPS_ERROR_NO_T +aml_getNumberOfChip( + UI32_T *ptr_num); + +/* FUNCTION NAME: aml_connectIsr + * PURPOSE: + * To enable the system intterupt and specify the ISR handler. + * INPUT: + * unit -- the device unit + * handler -- the ISR hanlder + * ptr_cookie -- pointer for the data as an argument of the handler + * OUTPUT: + * none + * RETURN: + * NPS_E_OK -- Successfully connect the ISR handler to the system. + * NPS_E_OTHERS -- Failed to connect the ISR handler to the system. + * NOTES: + * none + */ +NPS_ERROR_NO_T +aml_connectIsr( + const UI32_T unit, + AML_DEV_ISR_FUNC_T handler, + AML_DEV_ISR_DATA_T *ptr_cookie); + +/* FUNCTION NAME: aml_disconnectIsr + * PURPOSE: + * To disable the system intterupt notification. + * INPUT: + * unit -- the device unit + * OUTPUT: + * none + * RETURN: + * NPS_E_OK -- Successfully disconnect the ISR handler to the system. + * NPS_E_OTHERS -- Failed to disconnect the ISR handler to the system. + * NOTES: + * none + */ +NPS_ERROR_NO_T +aml_disconnectIsr( + const UI32_T unit); + +/* FUNCTION NAME: aml_getDeviceId + * PURPOSE: + * To get the vendor/device/revision ID of the specified chip unit. + * INPUT: + * unit -- the device unit + * OUTPUT: + * ptr_vendor_id -- pointer for the vendor ID + * ptr_device_id -- pointer for the device ID + * ptr_revision_id -- pointer for the revision ID + * RETURN: + * NPS_E_OK -- Successfully get the IDs. + * NPS_E_OTHERS -- Failed to get the IDs. + * NOTES: + * none + */ +NPS_ERROR_NO_T +aml_getDeviceId( + const UI32_T unit, + UI32_T *ptr_vendor_id, + UI32_T *ptr_device_id, + UI32_T *ptr_revision_id); + +/* FUNCTION NAME: aml_readReg + * PURPOSE: + * To read data from the register of the specified chip unit. + * INPUT: + * unit -- the device unit + * addr_offset -- the address of register + * len -- data size read + * OUTPUT: + * ptr_data -- pointer for the register data + * RETURN: + * NPS_E_OK -- Successfully read the data. + * NPS_E_OTHERS -- Failed to read the data. + * NOTES: + * none + */ +NPS_ERROR_NO_T +aml_readReg( + const UI32_T unit, + const UI32_T addr_offset, + UI32_T *ptr_data, + const UI32_T len); + +/* FUNCTION NAME: aml_writeReg + * PURPOSE: + * To write data to the register of the specified chip unit. + * INPUT: + * unit -- the device unit + * addr_offset -- the address of register + * ptr_data -- pointer for the written data + * len -- data size read + * OUTPUT: + * none + * RETURN: + * NPS_E_OK -- Successfully write the data. + * NPS_E_OTHERS -- Failed to write the data. + * NOTES: + * none + */ +NPS_ERROR_NO_T +aml_writeReg( + const UI32_T unit, + const UI32_T addr_offset, + const UI32_T *ptr_data, + const UI32_T len); + +/* FUNCTION NAME: aml_convertVirtToPhy + * PURPOSE: + * To get the physical address of the corresponding virtual + * address input. + * INPUT: + * ptr_virt_addr -- pointer to the virtual address + * OUTPUT: + * ptr_phy_addr -- pointer to the physical address + * RETURN: + * NPS_E_OK -- Successfully convert the address. + * NPS_E_OTHERS -- Failed to convert the address. + * The memory might be not allocated by AML. + * NOTES: + * none + */ +NPS_ERROR_NO_T +aml_convertVirtToPhy( + void *ptr_virt_addr, + NPS_ADDR_T *ptr_phy_addr); + +/* FUNCTION NAME: aml_convertPhyToVirt + * PURPOSE: + * To get the virtual address of the corresponding physical + * address input. + * INPUT: + * ptr_virt_addr -- pointer for the physical address + * OUTPUT: + * pptr_virt_addr -- pointer for the virtual address pointer + * RETURN: + * NPS_E_OK -- Successfully convert the address. + * NPS_E_OTHERS -- Failed to convert the address. + * The memory might be not allocated by AML. + * NOTES: + * none + */ +NPS_ERROR_NO_T +aml_convertPhyToVirt( + const NPS_ADDR_T phy_addr, + void **pptr_virt_addr); + +/* FUNCTION NAME: aml_flushCache + * PURPOSE: + * To update the data from CPU cache to the physical memory. + * INPUT: + * ptr_virt_addr -- pointer for the data + * size -- target data size to be updated + * OUTPUT: + * none + * RETURN: + * NPS_E_OK -- Successfully update the data from CPU cache + * to the physical memory. + * NPS_E_OTHERS -- Failed to pdate the data from CPU cache + * to the physical memory. + * NOTES: + * none + */ +NPS_ERROR_NO_T +aml_flushCache( + void *ptr_virt_addr, + const UI32_T size); + +/* FUNCTION NAME: aml_invalidateCache + * PURPOSE: + * To update the data from physical memory to the CPU cache. + * INPUT: + * ptr_virt_addr -- pointer for the data + * size -- target data size to be updated + * OUTPUT: + * none + * RETURN: + * NPS_E_OK -- Successfully update the data from physical memory + * to the CPU cache. + * NPS_E_OTHERS -- Failed to pdate the data from physical memory + * to the CPU cache. + * NOTES: + * none + */ +NPS_ERROR_NO_T +aml_invalidateCache( + void *ptr_virt_addr, + const UI32_T size); + +#endif /* #ifndef AML_H */ diff --git a/platform/nephos/nephos-modules/modules/src/inc/hal_dev.h b/platform/nephos/nephos-modules/modules/src/inc/hal_dev.h new file mode 100755 index 000000000000..edd582adc197 --- /dev/null +++ b/platform/nephos/nephos-modules/modules/src/inc/hal_dev.h @@ -0,0 +1,48 @@ +/* Copyright (C) 2019 Nephos, Inc. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of version 2 of the GNU General Public + * License as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * version 2 along with this program. + */ + +/* FILE NAME: hal_dev.h + * PURPOSE: + * Provide a list of device IDs. + * + * NOTES: + */ + +#ifndef HAL_DEV_H +#define HAL_DEV_H + +/* INCLUDE FILE DECLARATIONS + */ +/* NAMING CONSTANT DECLARATIONS + */ +#define HAL_MTK_VENDOR_ID (0x0E8D) +#define HAL_NP_VENDOR_ID (0x1D9F) + +#define HAL_DEVICE_ID_MT3257 (0x3257) +#define HAL_DEVICE_ID_MT3258 (0x3258) + +#define HAL_DEVICE_ID_NP8363 (0x8363) /* 1.08T 1Bin */ +#define HAL_DEVICE_ID_NP8365 (0x8365) /* 1.8T 1Bin */ +#define HAL_DEVICE_ID_NP8366 (0x8366) /* 2.4T 1Bin */ +#define HAL_DEVICE_ID_NP8367 (0x8367) /* 3.2T 1Bin */ +#define HAL_DEVICE_ID_NP8368 (0x8368) /* 3.2T 2Bin */ +#define HAL_DEVICE_ID_NP8369 (0x8369) /* 6.4T 2Bin */ + +#define HAL_REVISION_ID_E1 (0x01) +#define HAL_REVISION_ID_E2 (0x02) + +#define HAL_INVALID_DEVICE_ID (0xFFFFFFFF) + +#endif /* #ifndef HAL_DEV_H */ diff --git a/platform/nephos/nephos-modules/modules/src/inc/hal_tau_pkt_knl.h b/platform/nephos/nephos-modules/modules/src/inc/hal_tau_pkt_knl.h new file mode 100755 index 000000000000..96a8cf6441f0 --- /dev/null +++ b/platform/nephos/nephos-modules/modules/src/inc/hal_tau_pkt_knl.h @@ -0,0 +1,2302 @@ +/* Copyright (C) 2019 Nephos, Inc. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of version 2 of the GNU General Public + * License as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * version 2 along with this program. + */ + +/* FILE NAME: hal_tau_pkt_knl.h + * PURPOSE: + * To provide Linux kernel for PDMA TX/RX control. + * + * NOTES: + */ + +#ifndef HAL_TAU_PKT_KNL_H +#define HAL_TAU_PKT_KNL_H + + +/* CP_COMMON */ +#define HAL_TAU_PKT_CP_COMMON_INT_EN_HI (0x012C0000) +#define HAL_TAU_PKT_CP_COMMON_INT_LO_HI (0x012C0004) +#define HAL_TAU_PKT_CP_COMMON_INT_LVL_HI (0x012C0008) +#define HAL_TAU_PKT_CP_COMMON_INT_LVL_LO (0x012C000C) +#define HAL_TAU_PKT_CP_COMMON_INT_MASK_SET_HI (0x012C0010) +#define HAL_TAU_PKT_CP_COMMON_INT_MASK_SET_LO (0x012C0014) +#define HAL_TAU_PKT_CP_COMMON_INT_MASK_CLR_HI (0x012C0018) +#define HAL_TAU_PKT_CP_COMMON_INT_MASK_CLR_LO (0x012C001C) +#define HAL_TAU_PKT_CP_COMMON_INT_MASK_VAL_HI (0x012C0020) +#define HAL_TAU_PKT_CP_COMMON_INT_MASK_VAL_LO (0x012C0024) +#define HAL_TAU_PKT_CP_COMMON_INT_STAT_HI (0x012C0028) +#define HAL_TAU_PKT_CP_COMMON_INT_STAT_LO (0x012C002C) +#define HAL_TAU_PKT_CP_COMMON_INT_CLR_HI (0x012C0030) +#define HAL_TAU_PKT_CP_COMMON_INT_CLR_LO (0x012C0034) +#define HAL_TAU_PKT_CP_COMMON_INT_SET_HI (0x012C0038) +#define HAL_TAU_PKT_CP_COMMON_INT_SET_LO (0x012C003C) + +/* PDMA */ +#define HAL_TAU_PKT_PDMA_ERR_INT_STAT (0x013F1000) +#define HAL_TAU_PKT_PDMA_ERR_INT_CLR (0x013F1004) +#define HAL_TAU_PKT_PDMA_ERR_INT_EN (0x013F1010) +#define HAL_TAU_PKT_PDMA_ERR_INT_LVL (0x013F1014) +#define HAL_TAU_PKT_PDMA_ERR_INT_MASK_SET (0x013F1018) +#define HAL_TAU_PKT_PDMA_ERR_INT_MASK_CLR (0x013F101C) +#define HAL_TAU_PKT_PDMA_ERR_INT_MASK_VAL (0x013F1020) +#define HAL_TAU_PKT_PDMA_ERR_INT_SET (0x013F1024) +#define HAL_TAU_PKT_PDMA_CREDIT_CFG (0x013F1100) + +/* Rx */ +#define HAL_TAU_PKT_PDMA_RCH_GPD_RING_START_ADDR_LO (0x013F12E4) +#define HAL_TAU_PKT_PDMA_RCH_GPD_RING_START_ADDR_HI (0x013F12E8) +#define HAL_TAU_PKT_PDMA_RCH_GPD_RING_SIZE (0x013F12EC) +#define HAL_TAU_PKT_PDMA_RCH_CMD (0x013F1300) +#define HAL_TAU_PKT_PDMA_RCH_INT_EN (0x013F1360) +#define HAL_TAU_PKT_PDMA_RCH_INT_LVL (0x013F1364) +#define HAL_TAU_PKT_PDMA_RCH_INT_MASK (0x013F1368) +#define HAL_TAU_PKT_PDMA_RCH_INT_SET (0x013F1370) +#define HAL_TAU_PKT_PDMA_RCH_INT_CLR (0x013F1374) +#define HAL_TAU_PKT_PDMA_RCH_INT_STAT (0x013F1378) + +/* Tx */ +#define HAL_TAU_PKT_PDMA_TCH_GPD_RING_START_ADDR_LO (0x013F1A00) +#define HAL_TAU_PKT_PDMA_TCH_GPD_RING_START_ADDR_HI (0x013F1A04) +#define HAL_TAU_PKT_PDMA_TCH_GPD_RING_SIZE (0x013F1A08) +#define HAL_TAU_PKT_PDMA_TCH_CMD (0x013F1A20) +#define HAL_TAU_PKT_PDMA_TCH_INT_EN (0x013F1A40) +#define HAL_TAU_PKT_PDMA_TCH_INT_LVL (0x013F1A44) +#define HAL_TAU_PKT_PDMA_TCH_INT_MASK (0x013F1A48) +#define HAL_TAU_PKT_PDMA_TCH_INT_SET (0x013F1A50) +#define HAL_TAU_PKT_PDMA_TCH_INT_CLR (0x013F1A54) +#define HAL_TAU_PKT_PDMA_TCH_INT_STAT (0x013F1A58) + +#define HAL_TAU_PKT_GET_MMIO(__tbl__) (0x00FFFFFF & (__tbl__)) +#define HAL_TAU_PKT_GET_PDMA_RCH_REG(__tbl__, __channel__) ((__tbl__) + (0x200 * (__channel__))) +#define HAL_TAU_PKT_GET_PDMA_TCH_REG(__tbl__, __channel__) ((__tbl__) + (0x100 * (__channel__))) + + + +#define NPS_NETIF_NAME_LEN (16) +#define NPS_NETIF_PROFILE_NUM_MAX (256) +#define NPS_NETIF_PROFILE_PATTERN_NUM (4) +#define NPS_NETIF_PROFILE_PATTERN_LEN (8) + +/* nps_port.h */ +typedef enum +{ + NPS_PORT_SPEED_1G = 1000, + NPS_PORT_SPEED_10G = 10000, + NPS_PORT_SPEED_25G = 25000, + NPS_PORT_SPEED_40G = 40000, + NPS_PORT_SPEED_50G = 50000, + NPS_PORT_SPEED_100G = 100000, + NPS_PORT_SPEED_200G = 200000, + NPS_PORT_SPEED_400G = 400000, + NPS_PORT_SPEED_LAST +} NPS_PORT_SPEED_T; + + +/* hal_tau_const.h */ +#define HAL_TAU_PORT_NUM (128) +#define HAL_TAU_EXCPT_CPU_NUM (256) +#define HAL_TAU_INVALID_NVO3_ENCAP_IDX (0x3FFF) +#define HAL_TAU_INVALID_NVO3_ADJ_IDX (0xFF) +#define HAL_TAU_EXCPT_CPU_BASE_ID (28 * 1024) +#define HAL_TAU_EXCPT_CPU_NON_L3_MIN (0) +#define HAL_TAU_EXCPT_CPU_NON_L3_MAX (HAL_TAU_EXCPT_CPU_NON_L3_MIN + HAL_TAU_EXCPT_CPU_NUM - 1) +#define HAL_TAU_EXCPT_CPU_L3_MIN (HAL_TAU_EXCPT_CPU_NON_L3_MIN + HAL_TAU_EXCPT_CPU_NUM) +#define HAL_TAU_EXCPT_CPU_L3_MAX (HAL_TAU_EXCPT_CPU_L3_MIN + HAL_TAU_EXCPT_CPU_NUM - 1) + +/* hal_tau_pkt_rsrc.h */ +#define HAL_TAU_PKT_IPP_EXCPT_LAST (256) +#define HAL_TAU_PKT_EPP_EXCPT_LAST (64) + +#define HAL_TAU_PKT_IPP_EXCPT_IEV_SDK_REDIRECT_TO_CPU_L2UC \ + (192 + IEV_CFG_EXCPT_EN_W1_SDK_REDIRECT_TO_CPU_L2UC_FIELD_ID ) +#define HAL_TAU_PKT_IPP_EXCPT_IEV_SDK_REDIRECT_TO_CPU_L3UC \ + (192 + IEV_CFG_EXCPT_EN_W1_SDK_REDIRECT_TO_CPU_L3UC_FIELD_ID ) +#define HAL_TAU_PKT_IPP_EXCPT_IEV_SDK_L3UC_DA_MISS \ + (192 + IEV_CFG_EXCPT_EN_W1_SDK_L3UC_DA_MISS_FIELD_ID ) +#define HAL_TAU_PKT_IPP_EXCPT_IEV_SDK_L3MC_PIM_REGISTER \ + (192 + IEV_CFG_EXCPT_EN_W1_SDK_L3MC_PIM_REGISTER_FIELD_ID ) +#define HAL_TAU_PKT_IPP_EXCPT_IEV_SDK_FLEX_DECAP_0_REASON_0 \ + (224 + IEV_CFG_EXCPT_EN_W0_SDK_FLEX_DECAP_0_REASON_0_FIELD_ID) + +/* offset of ITM_RSLT_CPU_CP */ +#define HAL_TAU_PKT_IPP_COPY2CPU_OFFSET (16) +#define HAL_TAU_PKT_EPP_COPY2CPU_OFFSET (32) + +typedef UI32_T HAL_TAU_PKT_IPP_EXCPT_T; +typedef UI32_T HAL_TAU_PKT_EPP_EXCPT_T; + +typedef enum +{ + HAL_TAU_PKT_IPP_L3_EXCPT_FCOE_ZONING = 0, + HAL_TAU_PKT_IPP_L3_EXCPT_RPF, + HAL_TAU_PKT_IPP_L3_EXCPT_ICMP_REDIR, + HAL_TAU_PKT_IPP_L3_EXCPT_SW_FWD, + HAL_TAU_PKT_IPP_L3_EXCPT_MTU, + HAL_TAU_PKT_IPP_L3_EXCPT_TTL, + HAL_TAU_PKT_IPP_L3_EXCPT_LAST +} HAL_TAU_PKT_IPP_L3_EXCPT_T; + +typedef enum +{ + HAL_TAU_PKT_IPP_RSN_RSVD_0 = 0, + HAL_TAU_PKT_IPP_RSN_IDS_MPLS_MP_LSP_INNER_IP_LCL, + HAL_TAU_PKT_IPP_RSN_IDS_IP_MC_TNL_LCL_INTF_MISS, + HAL_TAU_PKT_IPP_RSN_IDS_IP_MC_TNL_GRE_ISIS, + HAL_TAU_PKT_IPP_RSN_IDS_IP_MC_TNL_GRE_KA, + HAL_TAU_PKT_IPP_RSN_IDS_IP_MC_TNL_INNER_IP_LCL, + HAL_TAU_PKT_IPP_RSN_IDS_TRILL_MC_LCL_INTF_MISS, + HAL_TAU_PKT_IPP_RSN_IDS_INNER_SRV_1ST_MISS, + HAL_TAU_PKT_IPP_RSN_IDS_INNER_SRV_2ND_MISS, + HAL_TAU_PKT_IPP_RSN_IEV_IP_MC_TTL0, + HAL_TAU_PKT_IPP_RSN_IEV_IP_MC_TTL1, + HAL_TAU_PKT_IPP_RSN_RSVD_1, + HAL_TAU_PKT_IPP_RSN_RSVD_2, + HAL_TAU_PKT_IPP_RSN_IDS_ECN, + HAL_TAU_PKT_IPP_RSN_IEV_ICMP_REDIR, + HAL_TAU_PKT_IPP_RSN_IEV_ICMP_REDIR_WITH_RSN_IDS_ECN, + HAL_TAU_PKT_IPP_RSN_LAST +} HAL_TAU_PKT_IPP_RSN_T; + +typedef enum +{ + /* IEV.cp_to_cpu_bmap |= + * (1 << sw_plane.iev_cp_to_cpu_bit_pos_{i}) | + * (1 << sw_plane.iev_sflw_cp_to_cpu_bidx_{i}) | + * (1 << (IEV_RSLT_CTL2CPU_PROF.code - 1)) | + * (1 << (ICIA_RSLT_TCAM_UCP_POLICY.cp_to_cpu_idx - 1)); + */ + HAL_TAU_PKT_IPP_COPY2CPU_ICIA_0 = 0, + HAL_TAU_PKT_IPP_COPY2CPU_ICIA_1, + HAL_TAU_PKT_IPP_COPY2CPU_ICIA_2, + HAL_TAU_PKT_IPP_COPY2CPU_ICIA_3, + HAL_TAU_PKT_IPP_COPY2CPU_ICIA_4, + HAL_TAU_PKT_IPP_COPY2CPU_ICIA_5, + HAL_TAU_PKT_IPP_COPY2CPU_COPY_TO_CPU_L2UC, + HAL_TAU_PKT_IPP_COPY2CPU_COPY_TO_CPU_L3UC, + HAL_TAU_PKT_IPP_COPY2CPU_PORT_SFLOW, + HAL_TAU_PKT_IPP_COPY2CPU_ICIA_SFLOW, + HAL_TAU_PKT_IPP_COPY2CPU_FLOW_SFLOW, + HAL_TAU_PKT_IPP_COPY2CPU_L3MC_SPT_READY_UNSET, + HAL_TAU_PKT_IPP_COPY2CPU_COPY_TO_CPU_L2MC, + HAL_TAU_PKT_IPP_COPY2CPU_COPY_TO_CPU_L3MC, + HAL_TAU_PKT_IPP_COPY2CPU_USR_DEFINE_0, + HAL_TAU_PKT_IPP_COPY2CPU_USR_DEFINE_1, + HAL_TAU_PKT_IPP_COPY2CPU_LAST +} HAL_TAU_PKT_IPP_COPY2CPU_T; + +typedef enum +{ + /* The value of: + * 1. emi_sflw_cp_to_cpu_idx_bidx_* + * 2. ECIA.cp_to_cpu_idx (last is invalid) + */ + HAL_TAU_PKT_EPP_COPY2CPU_ECIA_0 = 0, + HAL_TAU_PKT_EPP_COPY2CPU_ECIA_1, + HAL_TAU_PKT_EPP_COPY2CPU_ECIA_2, + HAL_TAU_PKT_EPP_COPY2CPU_ECIA_3, + HAL_TAU_PKT_EPP_COPY2CPU_PORT_SFLOW, + HAL_TAU_PKT_EPP_COPY2CPU_ECIA_SFLOW, + HAL_TAU_PKT_EPP_COPY2CPU_RSVD_0, + HAL_TAU_PKT_EPP_COPY2CPU_RSVD_1, + HAL_TAU_PKT_EPP_COPY2CPU_LAST +} HAL_TAU_PKT_EPP_COPY2CPU_T; + +#define HAL_TAU_PKT_IPP_EXCPT_BITMAP_SIZE (NPS_BITMAP_SIZE(HAL_TAU_PKT_IPP_EXCPT_LAST)) +#define HAL_TAU_PKT_IPP_L3_EXCPT_BITMAP_SIZE (NPS_BITMAP_SIZE(HAL_TAU_PKT_IPP_L3_EXCPT_LAST)) +#define HAL_TAU_PKT_EPP_EXCPT_BITMAP_SIZE (NPS_BITMAP_SIZE(HAL_TAU_PKT_EPP_EXCPT_LAST)) +#define HAL_TAU_PKT_IPP_RSN_BITMAP_SIZE (NPS_BITMAP_SIZE(HAL_TAU_PKT_IPP_RSN_LAST)) +#define HAL_TAU_PKT_IPP_COPY2CPU_BITMAP_SIZE (NPS_BITMAP_SIZE(HAL_TAU_PKT_IPP_COPY2CPU_LAST)) +#define HAL_TAU_PKT_EPP_COPY2CPU_BITMAP_SIZE (NPS_BITMAP_SIZE(HAL_TAU_PKT_EPP_COPY2CPU_LAST)) + +typedef UI32_T HAL_TAU_PKT_IPP_EXCPT_BITMAP_T[HAL_TAU_PKT_IPP_EXCPT_BITMAP_SIZE]; +typedef UI32_T HAL_TAU_PKT_IPP_L3_EXCPT_BITMAP_T[HAL_TAU_PKT_IPP_L3_EXCPT_BITMAP_SIZE]; +typedef UI32_T HAL_TAU_PKT_EPP_EXCPT_BITMAP_T[HAL_TAU_PKT_EPP_EXCPT_BITMAP_SIZE]; +typedef UI32_T HAL_TAU_PKT_IPP_RSN_BITMAP_T[HAL_TAU_PKT_IPP_RSN_BITMAP_SIZE]; +typedef UI32_T HAL_TAU_PKT_IPP_COPY2CPU_BITMAP_T[HAL_TAU_PKT_IPP_COPY2CPU_BITMAP_SIZE]; +typedef UI32_T HAL_TAU_PKT_EPP_COPY2CPU_BITMAP_T[HAL_TAU_PKT_EPP_COPY2CPU_BITMAP_SIZE]; + +typedef struct +{ + /* excpt */ + HAL_TAU_PKT_IPP_EXCPT_BITMAP_T ipp_excpt_bitmap; + HAL_TAU_PKT_IPP_L3_EXCPT_BITMAP_T ipp_l3_excpt_bitmap; + HAL_TAU_PKT_EPP_EXCPT_BITMAP_T epp_excpt_bitmap; + + /* cp */ + HAL_TAU_PKT_IPP_RSN_BITMAP_T ipp_rsn_bitmap; + HAL_TAU_PKT_IPP_COPY2CPU_BITMAP_T ipp_copy2cpu_bitmap; + HAL_TAU_PKT_EPP_COPY2CPU_BITMAP_T epp_copy2cpu_bitmap; + +} HAL_TAU_PKT_RX_REASON_BITMAP_T; + + +/* hal_tau_pkt.h */ + +/* NAMING DECLARATIONS + */ +/* PKT related configurable parameters */ +#define HAL_TAU_PKT_RX_FREE_STACK_SIZE (64 * 1024) +#define HAL_TAU_PKT_RX_FREE_THREAD_PRI (80) + +#define HAL_TAU_PKT_RX_ISR_STACK_SIZE (64 * 1024) +#define HAL_TAU_PKT_RX_ISR_THREAD_PRI (80) + +#define HAL_TAU_PKT_TX_FREE_STACK_SIZE (64 * 1024) +#define HAL_TAU_PKT_TX_FREE_THREAD_PRI (80) + +#define HAL_TAU_PKT_TX_ISR_STACK_SIZE (64 * 1024) +#define HAL_TAU_PKT_TX_ISR_THREAD_PRI (80) + +#define HAL_TAU_PKT_TX_NET_STACK_SIZE (64 * 1024) +#define HAL_TAU_PKT_TX_NET_THREAD_PRI (80) + +#define HAL_TAU_PKT_ERROR_ISR_STACK_SIZE (64 * 1024) +#define HAL_TAU_PKT_ERROR_ISR_THREAD_PRI (80) + +/* PKT definitions */ +#define HAL_TAU_PKT_TX_MAX_LEN (9216) +#define HAL_TAU_PKT_RX_MAX_LEN (9216 + 86) /* EPP tunnel header */ +#define HAL_TAU_PKT_MIN_LEN (64) /* Ethernet definition */ +#define HAL_TAU_PKT_TMH_HDR_SZ (20) +#define HAL_TAU_PKT_PPH_HDR_SZ (20) +#define HAL_TAU_PKT_CRC_LEN (4) + +/* CH */ +#define HAL_TAU_PKT_CH_LAST_GPD (0) +#define HAL_TAU_PKT_CH_MIDDLE_GPD (1) + +/* PRG */ +#define HAL_TAU_PKT_PRG_PROCESS_GPD (0) /* Normal */ +#define HAL_TAU_PKT_PRG_SKIP_GPD (1) /* Skip */ + +/* CRCC */ +#define HAL_TAU_PKT_CRCC_SUM_BY_HW (0) /* calculated by HW */ +#define HAL_TAU_PKT_CRCC_SUM_BY_SW (1) /* calculated by SW */ + +/* IOC */ +#define HAL_TAU_PKT_IOC_NO_INTR (0) /* trigger interrupt each GPD */ +#define HAL_TAU_PKT_IOC_HAS_INTR (1) /* trigger interrupt when ch=0, default setting */ + +/* HWO */ +#define HAL_TAU_PKT_HWO_SW_OWN (0) +#define HAL_TAU_PKT_HWO_HW_OWN (1) + +/* ECC */ +#define HAL_TAU_PKT_ECC_ERROR_OCCUR (1) + +/* CPU queue number */ +#define HAL_TAU_PKT_CPU_QUE_NUM (48) + +/* PDMA Definitions */ +#define HAL_TAU_PKT_PDMA_MAX_GPD_PER_PKT (10) /* <= 256 */ +#define HAL_TAU_PKT_PDMA_TX_GPD_NUM (1024) /* <= 65535 */ +#define HAL_TAU_PKT_PDMA_RX_GPD_NUM (1024) /* <= 65535 */ +#define HAL_TAU_PKT_PDMA_TX_INTR_TIMEOUT (10 * 1000) /* us */ +#define HAL_TAU_PKT_PDMA_TX_POLL_MAX_LOOP (10 * 1000) /* int */ + +/* Mode */ +#define HAL_TAU_PKT_TX_WAIT_MODE (HAL_TAU_PKT_TX_WAIT_ASYNC) +#define HAL_TAU_PKT_RX_SCHED_MODE (HAL_TAU_PKT_RX_SCHED_RR) + +/* TX Queue */ +#define HAL_TAU_PKT_TX_QUEUE_LEN (HAL_TAU_PKT_PDMA_TX_GPD_NUM * 10) +#define HAL_TAU_PKT_TX_TASK_MAX_LOOP (HAL_TAU_PKT_TX_QUEUE_LEN) + +/* RX Queue */ +#define HAL_TAU_PKT_RX_QUEUE_NUM (HAL_TAU_PKT_RX_CHANNEL_LAST) +#define HAL_TAU_PKT_RX_QUEUE_WEIGHT (10) +#define HAL_TAU_PKT_RX_QUEUE_LEN (HAL_TAU_PKT_PDMA_RX_GPD_NUM * 10) +#define HAL_TAU_PKT_RX_TASK_MAX_LOOP (HAL_TAU_PKT_RX_QUEUE_LEN) + +/* MACRO FUNCTION DECLARATIONS + */ +/*---------------------------------------------------------------------------*/ +/* [Taurus] Alignment to 64-bytes */ +#if defined(NPS_EN_HOST_64_BIT_BIG_ENDIAN) || defined(NPS_EN_HOST_64_BIT_LITTLE_ENDIAN) +#define HAL_TAU_PKT_PDMA_ALIGN_ADDR(pdma_addr, align_sz) (((pdma_addr) + (align_sz)) & 0xFFFFFFFFFFFFFFC0) +#else +#define HAL_TAU_PKT_PDMA_ALIGN_ADDR(pdma_addr, align_sz) (((pdma_addr) + (align_sz)) & 0xFFFFFFC0) +#endif +/*---------------------------------------------------------------------------*/ +#if defined(NPS_EN_BIG_ENDIAN) +#define HAL_TAU_PKT_ENDIAN_SWAP32(val) (val) +#else +#define HAL_TAU_PKT_ENDIAN_SWAP32(val) ((((UI32_T)(val) & 0xFF) << 24) | \ + (((UI32_T)(val) & 0xFF00) << 8) | \ + (((UI32_T)(val) & 0xFF0000) >> 8) | \ + (((UI32_T)(val) & 0xFF000000) >> 24)) +#endif +/*---------------------------------------------------------------------------*/ +#define HAL_TAU_PKT_GET_BIT(flags, bit) ((((flags) & (bit)) > 0)? 1 : 0) +/*---------------------------------------------------------------------------*/ +#define HAL_TAU_PKT_SET_BITMAP(bitmap, mask_bitmap) (bitmap = ((bitmap) | (mask_bitmap))) +#define HAL_TAU_PKT_CLR_BITMAP(bitmap, mask_bitmap) (bitmap = ((bitmap) & (~(mask_bitmap)))) +/*---------------------------------------------------------------------------*/ +#define HAL_TAU_PKT_GET_TX_INTR_TYPE(channel) (HAL_INTR_TX_CH0 + channel) +#define HAL_TAU_PKT_GET_RX_INTR_TYPE(channel) (HAL_INTR_RX_CH0 + channel) + +/* DATA TYPE DECLARATIONS + */ +typedef enum +{ + HAL_TAU_PKT_TX_WAIT_ASYNC = 0, + HAL_TAU_PKT_TX_WAIT_SYNC_INTR = 1, + HAL_TAU_PKT_TX_WAIT_SYNC_POLL = 2 + +} HAL_TAU_PKT_TX_WAIT_T; + +typedef enum +{ + HAL_TAU_PKT_RX_SCHED_RR = 0, + HAL_TAU_PKT_RX_SCHED_WRR = 1 + +} HAL_TAU_PKT_RX_SCHED_T; + +/* GPD and Packet Strucutre Definition */ +#if defined(NPS_EN_BIG_ENDIAN) + +typedef struct +{ + /* NPS DWORD 0 */ + UI32_T typ : 2; + UI32_T tc : 4; + UI32_T color : 2; + UI32_T srv : 3; + UI32_T trig : 1; + UI32_T igr_phy_port :12; + UI32_T hsh_val_w0 : 8; + /* NPS DWORD 1 */ + UI32_T hsh_val_w1 : 2; + UI32_T dst_idx :15; + UI32_T src_idx :15; + /* NPS DWORD 2 */ + UI32_T intf_fdid :14; + UI32_T nvo3_mgid_is_transit : 1; + UI32_T skip_epp : 1; + UI32_T steer_applied : 1; + UI32_T nvo3_ip_tnl_decap_prop_ttl : 1; + UI32_T nvo3_mpls_uhp_prop_ttl : 1; + UI32_T ecn : 2; + UI32_T store_and_forward : 1; + UI32_T lag_epoch : 1; + UI32_T src_supp_tag : 5; + UI32_T one_arm_rte_srv_fdid : 1; + UI32_T fab_one_arm_rte : 1; + UI32_T skip_ipp : 1; + UI32_T igr_fab_port_grp : 1; + /* NPS DWORD 3 */ + UI32_T : 2; + UI32_T nvo3_mgid :15; + UI32_T nvo3_intf :14; + UI32_T nvo3_src_supp_tag_w0 : 1; + /* NPS DWORD 4 */ + UI32_T nvo3_src_supp_tag_w1 : 4; + UI32_T mir_bmap : 8; + UI32_T cp_to_cpu_code : 4; + UI32_T cp_to_cpu_bmap :16; +} HAL_TAU_PKT_ITMH_ETH_T; + +typedef struct +{ + /* NPS DWORD 0 */ + UI32_T typ : 2; + UI32_T tc : 4; + UI32_T color : 2; + UI32_T srv : 3; + UI32_T trig : 1; + UI32_T igr_phy_port :12; + UI32_T hsh_val_w0 : 8; + /* NPS DWORD 1 */ + UI32_T hsh_val_w1 : 2; + UI32_T dst_idx :15; + UI32_T src_idx :15; + /* NPS DWORD 2 */ + UI32_T intf_fdid :14; + UI32_T nvo3_mgid_is_transit : 1; + UI32_T skip_epp : 1; + UI32_T steer_applied : 1; + UI32_T nvo3_ip_tnl_decap_prop_ttl : 1; + UI32_T nvo3_mpls_uhp_prop_ttl : 1; + UI32_T ecn : 2; + UI32_T store_and_forward : 1; + UI32_T lag_epoch : 1; + UI32_T src_supp_tag : 5; + UI32_T one_arm_rte_srv_fdid : 1; + UI32_T fab_one_arm_rte : 1; + UI32_T : 2; + /* NPS DWORD 3 */ + UI32_T :32; + /* NPS DWORD 4 */ + UI32_T :23; + UI32_T excpt_code : 8; + UI32_T exp_dscp_mrkd : 1; +} HAL_TAU_PKT_ETMH_FAB_T; + +typedef struct +{ + /* NPS DWORD 0 */ + UI32_T typ : 2; + UI32_T tc : 4; + UI32_T color : 2; + UI32_T srv : 3; + UI32_T trig : 1; + UI32_T igr_phy_port :12; + UI32_T hsh_val_w0 : 8; + /* NPS DWORD 1 */ + UI32_T hsh_val_w1 : 2; + UI32_T dst_idx :15; + UI32_T src_idx :15; + /* NPS DWORD 2 */ + UI32_T intf_fdid :14; + UI32_T nvo3_mgid_is_transit : 1; + UI32_T skip_epp : 1; + UI32_T steer_applied : 1; + UI32_T nvo3_ip_tnl_decap_prop_ttl : 1; + UI32_T nvo3_mpls_uhp_prop_ttl : 1; + UI32_T ecn : 2; + UI32_T igr_fab_port_grp : 1; + UI32_T redir : 1; + UI32_T excpt_code_mir_bmap : 8; + UI32_T cp_to_cpu_bmap_w0 : 1; + /* NPS DWORD 3 */ + UI32_T cp_to_cpu_bmap_w1 : 7; + UI32_T egr_phy_port :12; + UI32_T src_supp_pnd : 1; + UI32_T mc_vid_ctl : 3; + UI32_T mc_vid_1st_w0 : 9; + /* NPS DWORD 4 */ + UI32_T mc_vid_1st_w1 : 3; + UI32_T mc_vid_2nd :12; + UI32_T mc_decr_ttl : 1; + UI32_T mc_is_routed : 1; + UI32_T mc_mel_vld : 1; + UI32_T mc_cp_idx :13; + UI32_T exp_dscp_mrkd : 1; +} HAL_TAU_PKT_ETMH_ETH_T; + +typedef struct +{ + /* NPS DWORD 0 */ + UI32_T decap_act : 3; + UI32_T igr_l2_vid_num : 2; + UI32_T nvo3_encap_idx :14; + UI32_T mpls_pw_cw_vld : 1; + UI32_T hit_idx_w0 :12; + /* NPS DWORD 1 */ + UI32_T hit_idx_w1 : 7; + UI32_T nvo3_adj_idx : 8; + UI32_T seg_vmid_w0 :17; + /* NPS DWORD 2 */ + UI32_T seg_vmid_w1 : 7; + UI32_T : 1; + UI32_T l2_sa_lrn_en_hw_cvs : 1; + UI32_T l2_sa_lrn_en_hw : 1; + UI32_T vid_ctl : 3; + UI32_T vid_1st :12; + UI32_T vid_2nd_w0 : 7; + /* NPS DWORD 3 */ + UI32_T vid_2nd_w1 : 5; + UI32_T flw_lbl :10; + UI32_T rewr_idx_ctl : 2; + UI32_T rewr_idx_0 :13; + UI32_T rewr_idx_1_w0 : 2; + /* NPS DWORD 4 */ + UI32_T rewr_idx_1_w1 :11; + UI32_T mrk_pcp_dei_en : 1; + UI32_T mrk_pcp_val : 3; + UI32_T mrk_dei_val : 1; + UI32_T ts :16; +} HAL_TAU_PKT_PPH_L2_T; + +typedef struct +{ + /* NPS DWORD 0 */ + UI32_T decap_act : 3; + UI32_T igr_l2_vid_num : 2; + UI32_T nvo3_encap_idx :14; + UI32_T mpls_pw_cw_vld : 1; + UI32_T hit_idx_w0 :12; + /* NPS DWORD 1 */ + UI32_T hit_idx_w1 : 7; + UI32_T nvo3_adj_idx : 8; + UI32_T seg_vmid_w0 :17; + /* NPS DWORD 2 */ + UI32_T seg_vmid_w1 : 7; + UI32_T : 1; + UI32_T rpf_pnd : 1; + UI32_T adj_idx :18; + UI32_T is_mc : 1; + UI32_T decr_ttl : 1; + UI32_T decap_prop_ttl : 1; + UI32_T mrk_dscp_en : 1; + UI32_T mrk_dscp_val_w0 : 1; + /* NPS DWORD 3 */ + UI32_T mrk_dscp_val_w1 : 5; + UI32_T flw_lbl :10; + UI32_T rewr_idx_ctl : 2; + UI32_T rewr_idx_0 :13; + UI32_T rewr_idx_1_w0 : 2; + /* NPS DWORD 4 */ + UI32_T rewr_idx_1_w1 :11; + UI32_T mrk_pcp_dei_en : 1; + UI32_T mrk_pcp_val : 3; + UI32_T mrk_dei_val : 1; + UI32_T ts :16; +} HAL_TAU_PKT_PPH_L3UC_T; + +typedef struct +{ + /* NPS DWORD 0 */ + UI32_T decap_act : 3; + UI32_T igr_l2_vid_num : 2; + UI32_T nvo3_encap_idx :14; + UI32_T mpls_pw_cw_vld : 1; + UI32_T hit_idx_w0 :12; + /* NPS DWORD 1 */ + UI32_T hit_idx_w1 : 7; + UI32_T nvo3_adj_idx : 8; + UI32_T vid_1st :12; + UI32_T vid_2nd_w0 : 5; + /* NPS DWORD 2 */ + UI32_T vid_2nd_w1 : 7; + UI32_T :15; + UI32_T l2_sa_lrn_en_hw_cvs : 1; + UI32_T l2_sa_lrn_en_hw : 1; + UI32_T vid_ctl : 3; + UI32_T is_mc : 1; + UI32_T : 1; + UI32_T decap_prop_ttl : 1; + UI32_T mrk_dscp_en : 1; + UI32_T mrk_dscp_val_w0 : 1; + /* NPS DWORD 3 */ + UI32_T mrk_dscp_val_w1 : 5; + UI32_T flw_lbl :10; + UI32_T rewr_idx_ctl : 2; + UI32_T rewr_idx_0 :13; + UI32_T rewr_idx_1_w0 : 2; + /* NPS DWORD 4 */ + UI32_T rewr_idx_1_w1 :11; + UI32_T mrk_pcp_dei_en : 1; + UI32_T mrk_pcp_val : 3; + UI32_T mrk_dei_val : 1; + UI32_T ts :16; +} HAL_TAU_PKT_PPH_L3MC_T; + +typedef struct +{ + /* NPS DWORD 0 */ + UI32_T decap_act : 3; + UI32_T igr_l2_vid_num : 2; + UI32_T nvo3_encap_idx :14; + UI32_T : 1; + UI32_T hit_idx_w0 :12; + /* NPS DWORD 1 */ + UI32_T hit_idx_w1 : 7; + UI32_T nvo3_adj_idx : 8; + UI32_T seg_vmid_w0 :17; + /* NPS DWORD 2 */ + UI32_T seg_vmid_w1 : 7; + UI32_T : 2; + UI32_T adj_idx :18; + UI32_T : 1; + UI32_T decr_ttl : 1; + UI32_T decap_prop_ttl : 1; + UI32_T mrk_exp_en : 1; + UI32_T : 1; + /* NPS DWORD 3 */ + UI32_T : 2; + UI32_T mrk_exp_val : 3; + UI32_T php_pop_keep_inner_qos : 1; + UI32_T :26; + /* NPS DWORD 4 */ + UI32_T :11; + UI32_T mrk_pcp_dei_en : 1; + UI32_T mrk_pcp_val : 3; + UI32_T mrk_dei_val : 1; + UI32_T ts :16; +} HAL_TAU_PKT_PPH_L25_T; + +#elif defined(NPS_EN_LITTLE_ENDIAN) + +typedef struct +{ + /* NPS DWORD 0 */ + UI32_T hsh_val_w0 : 8; + UI32_T igr_phy_port :12; + UI32_T trig : 1; + UI32_T srv : 3; + UI32_T color : 2; + UI32_T tc : 4; + UI32_T typ : 2; + /* NPS DWORD 1 */ + UI32_T src_idx :15; + UI32_T dst_idx :15; + UI32_T hsh_val_w1 : 2; + /* NPS DWORD 2 */ + UI32_T igr_fab_port_grp : 1; + UI32_T skip_ipp : 1; + UI32_T fab_one_arm_rte : 1; + UI32_T one_arm_rte_srv_fdid : 1; + UI32_T src_supp_tag : 5; + UI32_T lag_epoch : 1; + UI32_T store_and_forward : 1; + UI32_T ecn : 2; + UI32_T nvo3_mpls_uhp_prop_ttl : 1; + UI32_T nvo3_ip_tnl_decap_prop_ttl : 1; + UI32_T steer_applied : 1; + UI32_T skip_epp : 1; + UI32_T nvo3_mgid_is_transit : 1; + UI32_T intf_fdid :14; + /* NPS DWORD 3 */ + UI32_T nvo3_src_supp_tag_w0 : 1; + UI32_T nvo3_intf :14; + UI32_T nvo3_mgid :15; + UI32_T : 2; + /* NPS DWORD 4 */ + UI32_T cp_to_cpu_bmap :16; + UI32_T cp_to_cpu_code : 4; + UI32_T mir_bmap : 8; + UI32_T nvo3_src_supp_tag_w1 : 4; +} HAL_TAU_PKT_ITMH_ETH_T; + +typedef struct +{ + /* NPS DWORD 0 */ + UI32_T hsh_val_w0 : 8; + UI32_T igr_phy_port :12; + UI32_T trig : 1; + UI32_T srv : 3; + UI32_T color : 2; + UI32_T tc : 4; + UI32_T typ : 2; + /* NPS DWORD 1 */ + UI32_T src_idx :15; + UI32_T dst_idx :15; + UI32_T hsh_val_w1 : 2; + /* NPS DWORD 2 */ + UI32_T : 2; + UI32_T fab_one_arm_rte : 1; + UI32_T one_arm_rte_srv_fdid : 1; + UI32_T src_supp_tag : 5; + UI32_T lag_epoch : 1; + UI32_T store_and_forward : 1; + UI32_T ecn : 2; + UI32_T nvo3_mpls_uhp_prop_ttl : 1; + UI32_T nvo3_ip_tnl_decap_prop_ttl : 1; + UI32_T steer_applied : 1; + UI32_T skip_epp : 1; + UI32_T nvo3_mgid_is_transit : 1; + UI32_T intf_fdid :14; + /* NPS DWORD 3 */ + UI32_T :32; + /* NPS DWORD 4 */ + UI32_T exp_dscp_mrkd : 1; + UI32_T excpt_code : 8; + UI32_T :23; +} HAL_TAU_PKT_ETMH_FAB_T; + +typedef struct +{ + /* NPS DWORD 0 */ + UI32_T hsh_val_w0 : 8; + UI32_T igr_phy_port :12; + UI32_T trig : 1; + UI32_T srv : 3; + UI32_T color : 2; + UI32_T tc : 4; + UI32_T typ : 2; + /* NPS DWORD 1 */ + UI32_T src_idx :15; + UI32_T dst_idx :15; + UI32_T hsh_val_w1 : 2; + /* NPS DWORD 2 */ + UI32_T cp_to_cpu_bmap_w0 : 1; + UI32_T excpt_code_mir_bmap : 8; + UI32_T redir : 1; + UI32_T igr_fab_port_grp : 1; + UI32_T ecn : 2; + UI32_T nvo3_mpls_uhp_prop_ttl : 1; + UI32_T nvo3_ip_tnl_decap_prop_ttl : 1; + UI32_T steer_applied : 1; + UI32_T skip_epp : 1; + UI32_T nvo3_mgid_is_transit : 1; + UI32_T intf_fdid :14; + /* NPS DWORD 3 */ + UI32_T mc_vid_1st_w0 : 9; + UI32_T mc_vid_ctl : 3; + UI32_T src_supp_pnd : 1; + UI32_T egr_phy_port :12; + UI32_T cp_to_cpu_bmap_w1 : 7; + /* NPS DWORD 4 */ + UI32_T exp_dscp_mrkd : 1; + UI32_T mc_cp_idx :13; + UI32_T mc_mel_vld : 1; + UI32_T mc_is_routed : 1; + UI32_T mc_decr_ttl : 1; + UI32_T mc_vid_2nd :12; + UI32_T mc_vid_1st_w1 : 3; +} HAL_TAU_PKT_ETMH_ETH_T; + +typedef struct +{ + /* NPS DWORD 0 */ + UI32_T hit_idx_w0 :12; + UI32_T mpls_pw_cw_vld : 1; + UI32_T nvo3_encap_idx :14; + UI32_T igr_l2_vid_num : 2; + UI32_T decap_act : 3; + /* NPS DWORD 1 */ + UI32_T seg_vmid_w0 :17; + UI32_T nvo3_adj_idx : 8; + UI32_T hit_idx_w1 : 7; + /* NPS DWORD 2 */ + UI32_T vid_2nd_w0 : 7; + UI32_T vid_1st :12; + UI32_T vid_ctl : 3; + UI32_T l2_sa_lrn_en_hw : 1; + UI32_T l2_sa_lrn_en_hw_cvs : 1; + UI32_T : 1; + UI32_T seg_vmid_w1 : 7; + /* NPS DWORD 3 */ + UI32_T rewr_idx_1_w0 : 2; + UI32_T rewr_idx_0 :13; + UI32_T rewr_idx_ctl : 2; + UI32_T flw_lbl :10; + UI32_T vid_2nd_w1 : 5; + /* NPS DWORD 4 */ + UI32_T ts :16; + UI32_T mrk_dei_val : 1; + UI32_T mrk_pcp_val : 3; + UI32_T mrk_pcp_dei_en : 1; + UI32_T rewr_idx_1_w1 :11; +} HAL_TAU_PKT_PPH_L2_T; + +typedef struct +{ + /* NPS DWORD 0 */ + UI32_T hit_idx_w0 :12; + UI32_T mpls_pw_cw_vld : 1; + UI32_T nvo3_encap_idx :14; + UI32_T igr_l2_vid_num : 2; + UI32_T decap_act : 3; + /* NPS DWORD 1 */ + UI32_T seg_vmid_w0 :17; + UI32_T nvo3_adj_idx : 8; + UI32_T hit_idx_w1 : 7; + /* NPS DWORD 2 */ + UI32_T mrk_dscp_val_w0 : 1; + UI32_T mrk_dscp_en : 1; + UI32_T decap_prop_ttl : 1; + UI32_T decr_ttl : 1; + UI32_T is_mc : 1; + UI32_T adj_idx :18; + UI32_T rpf_pnd : 1; + UI32_T : 1; + UI32_T seg_vmid_w1 : 7; + /* NPS DWORD 3 */ + UI32_T rewr_idx_1_w0 : 2; + UI32_T rewr_idx_0 :13; + UI32_T rewr_idx_ctl : 2; + UI32_T flw_lbl :10; + UI32_T mrk_dscp_val_w1 : 5; + /* NPS DWORD 4 */ + UI32_T ts :16; + UI32_T mrk_dei_val : 1; + UI32_T mrk_pcp_val : 3; + UI32_T mrk_pcp_dei_en : 1; + UI32_T rewr_idx_1_w1 :11; +} HAL_TAU_PKT_PPH_L3UC_T; + +typedef struct +{ + /* NPS DWORD 0 */ + UI32_T hit_idx_w0 :12; + UI32_T mpls_pw_cw_vld : 1; + UI32_T nvo3_encap_idx :14; + UI32_T igr_l2_vid_num : 2; + UI32_T decap_act : 3; + /* NPS DWORD 1 */ + UI32_T vid_2nd_w0 : 5; + UI32_T vid_1st :12; + UI32_T nvo3_adj_idx : 8; + UI32_T hit_idx_w1 : 7; + /* NPS DWORD 2 */ + UI32_T mrk_dscp_val_w0 : 1; + UI32_T mrk_dscp_en : 1; + UI32_T decap_prop_ttl : 1; + UI32_T : 1; + UI32_T is_mc : 1; + UI32_T vid_ctl : 3; + UI32_T l2_sa_lrn_en_hw : 1; + UI32_T l2_sa_lrn_en_hw_cvs : 1; + UI32_T :15; + UI32_T vid_2nd_w1 : 7; + /* NPS DWORD 3 */ + UI32_T rewr_idx_1_w0 : 2; + UI32_T rewr_idx_0 :13; + UI32_T rewr_idx_ctl : 2; + UI32_T flw_lbl :10; + UI32_T mrk_dscp_val_w1 : 5; + /* NPS DWORD 4 */ + UI32_T ts :16; + UI32_T mrk_dei_val : 1; + UI32_T mrk_pcp_val : 3; + UI32_T mrk_pcp_dei_en : 1; + UI32_T rewr_idx_1_w1 :11; +} HAL_TAU_PKT_PPH_L3MC_T; + +typedef struct +{ + /* NPS DWORD 0 */ + UI32_T hit_idx_w0 :12; + UI32_T : 1; + UI32_T nvo3_encap_idx :14; + UI32_T igr_l2_vid_num : 2; + UI32_T decap_act : 3; + /* NPS DWORD 1 */ + UI32_T seg_vmid_w0 :17; + UI32_T nvo3_adj_idx : 8; + UI32_T hit_idx_w1 : 7; + /* NPS DWORD 2 */ + UI32_T : 1; + UI32_T mrk_exp_en : 1; + UI32_T decap_prop_ttl : 1; + UI32_T decr_ttl : 1; + UI32_T : 1; + UI32_T adj_idx :18; + UI32_T : 2; + UI32_T seg_vmid_w1 : 7; + /* NPS DWORD 3 */ + UI32_T :26; + UI32_T php_pop_keep_inner_qos : 1; + UI32_T mrk_exp_val : 3; + UI32_T : 2; + /* NPS DWORD 4 */ + UI32_T ts :16; + UI32_T mrk_dei_val : 1; + UI32_T mrk_pcp_val : 3; + UI32_T mrk_pcp_dei_en : 1; + UI32_T :11; +} HAL_TAU_PKT_PPH_L25_T; + +#else +#error "Host GPD endian is not defined!!\n" +#endif + +#if defined(NPS_EN_BIG_ENDIAN) + +/* RX GPD STRUCTURE */ +typedef struct +{ + UI32_T data_buf_addr_lo; + UI32_T data_buf_addr_hi; + UI32_T chksum : 16; + UI32_T ioc : 1; + UI32_T : 1; + UI32_T avbl_buf_len : 14; + UI32_T : 32; + + union + { + HAL_TAU_PKT_ITMH_ETH_T itmh_eth; + HAL_TAU_PKT_ETMH_FAB_T etmh_fab; + HAL_TAU_PKT_ETMH_ETH_T etmh_eth; + }; + union + { + HAL_TAU_PKT_PPH_L2_T pph_l2; + HAL_TAU_PKT_PPH_L3UC_T pph_l3uc; + HAL_TAU_PKT_PPH_L3MC_T pph_l3mc; + HAL_TAU_PKT_PPH_L25_T pph_l25; + }; + + UI32_T : 32; + UI32_T hwo : 1; + UI32_T ch : 1; + UI32_T trn : 1; + UI32_T ecce : 1; + UI32_T errf : 1; + UI32_T : 5; + UI32_T queue : 6; + UI32_T : 2; + UI32_T cnsm_buf_len : 14; + +} HAL_TAU_PKT_RX_GPD_T; + +/* TX GPD STRUCTURE */ +typedef struct +{ + UI32_T data_buf_addr_lo; + UI32_T data_buf_addr_hi; + UI32_T chksum : 16; + UI32_T ioc : 1; + UI32_T : 1; + UI32_T data_buf_size : 14; + UI32_T : 32; + + union + { + HAL_TAU_PKT_ITMH_ETH_T itmh_eth; + HAL_TAU_PKT_ETMH_FAB_T etmh_fab; + HAL_TAU_PKT_ETMH_ETH_T etmh_eth; + }; + union + { + HAL_TAU_PKT_PPH_L2_T pph_l2; + HAL_TAU_PKT_PPH_L3UC_T pph_l3uc; + HAL_TAU_PKT_PPH_L3MC_T pph_l3mc; + HAL_TAU_PKT_PPH_L25_T pph_l25; + }; + + UI32_T : 16; + UI32_T ptp_hdr : 16; + UI32_T hwo : 1; + UI32_T ch : 1; + UI32_T : 1; + UI32_T ecce : 1; + UI32_T : 4; + UI32_T cos : 3; + UI32_T phc : 1; /* PTP Header Control */ + UI32_T ipc : 2; /* Ingress Plane Control */ + UI32_T crcc : 1; + UI32_T prg : 1; /* Purge */ + UI32_T : 2; + UI32_T pkt_len : 14; /* Total packet length */ + +} HAL_TAU_PKT_TX_GPD_T; + +#elif defined(NPS_EN_LITTLE_ENDIAN) + +/* RX GPD STRUCTURE */ +typedef struct +{ + UI32_T data_buf_addr_lo; + UI32_T data_buf_addr_hi; + UI32_T avbl_buf_len : 14; + UI32_T : 1; + UI32_T ioc : 1; + UI32_T chksum : 16; + UI32_T : 32; + + union + { + HAL_TAU_PKT_ITMH_ETH_T itmh_eth; + HAL_TAU_PKT_ETMH_FAB_T etmh_fab; + HAL_TAU_PKT_ETMH_ETH_T etmh_eth; + }; + union + { + HAL_TAU_PKT_PPH_L2_T pph_l2; + HAL_TAU_PKT_PPH_L3UC_T pph_l3uc; + HAL_TAU_PKT_PPH_L3MC_T pph_l3mc; + HAL_TAU_PKT_PPH_L25_T pph_l25; + }; + + UI32_T : 32; + UI32_T cnsm_buf_len : 14; + UI32_T : 2; + UI32_T queue : 6; + UI32_T : 5; + UI32_T errf : 1; + UI32_T ecce : 1; + UI32_T trn : 1; + UI32_T ch : 1; + UI32_T hwo : 1; + +} HAL_TAU_PKT_RX_GPD_T; + +/* TX GPD STRUCTURE */ +typedef struct +{ + UI32_T data_buf_addr_lo; + UI32_T data_buf_addr_hi; + UI32_T data_buf_size : 14; + UI32_T : 1; + UI32_T ioc : 1; + UI32_T chksum : 16; + UI32_T : 32; + + union + { + HAL_TAU_PKT_ITMH_ETH_T itmh_eth; + HAL_TAU_PKT_ETMH_FAB_T etmh_fab; + HAL_TAU_PKT_ETMH_ETH_T etmh_eth; + }; + union + { + HAL_TAU_PKT_PPH_L2_T pph_l2; + HAL_TAU_PKT_PPH_L3UC_T pph_l3uc; + HAL_TAU_PKT_PPH_L3MC_T pph_l3mc; + HAL_TAU_PKT_PPH_L25_T pph_l25; + }; + + UI32_T ptp_hdr : 16; + UI32_T : 16; + UI32_T pkt_len : 14; /* Total packet length */ + UI32_T : 2; + UI32_T prg : 1; /* Purge */ + UI32_T crcc : 1; + UI32_T ipc : 2; /* Ingress Plane Control */ + UI32_T phc : 1; /* PTP Header Control */ + UI32_T cos : 3; + UI32_T : 4; + UI32_T ecce : 1; + UI32_T : 1; + UI32_T ch : 1; + UI32_T hwo : 1; +} HAL_TAU_PKT_TX_GPD_T; + +#else +#error "Host GPD endian is not defined\n" +#endif + +/* ----------------------------------------------------------------------------------- PP Type */ +typedef enum +{ + HAL_TAU_PKT_TMH_TYPE_ITMH_ETH = 0, + HAL_TAU_PKT_TMH_TYPE_ITMH_FAB, + HAL_TAU_PKT_TMH_TYPE_ETMH_FAB, + HAL_TAU_PKT_TMH_TYPE_ETMH_ETH, + HAL_TAU_PKT_TMH_TYPE_LAST + +} HAL_TAU_PKT_TMH_TYPE_T; + +typedef enum +{ + HAL_TAU_PKT_TMH_SRV_L2 = 0, + HAL_TAU_PKT_TMH_SRV_L25_MPLS, + HAL_TAU_PKT_TMH_SRV_L3, + HAL_TAU_PKT_TMH_SRV_EGR, /* L3 downgrade L2 */ + HAL_TAU_PKT_TMH_SRV_L25_NSH, + HAL_TAU_PKT_TMH_SRV_L25_TRILL, + HAL_TAU_PKT_TMH_SRV_LAST + +} HAL_TAU_PKT_TMH_SRV_T; + +typedef enum +{ + HAL_TAU_PKT_TMH_DECAP_NONE = 0, + HAL_TAU_PKT_TMH_DECAP_1_MPLS_LABEL, + HAL_TAU_PKT_TMH_DECAP_2_MPLS_LABEL, + HAL_TAU_PKT_TMH_DECAP_3_MPLS_LABEL, + HAL_TAU_PKT_TMH_DECAP_4_MPLS_LABEL, + HAL_TAU_PKT_TMH_DECAP_IP_TRILL_NSH, + HAL_TAU_PKT_TMH_DECAP_LAST + +} HAL_TAU_PKT_TMH_DECAP_T; + +typedef struct +{ + union + { + HAL_TAU_PKT_ITMH_ETH_T itmh_eth; + HAL_TAU_PKT_ETMH_FAB_T etmh_fab; + HAL_TAU_PKT_ETMH_ETH_T etmh_eth; + }; +} HAL_TAU_PKT_TMH_T; + +typedef struct +{ + union + { + HAL_TAU_PKT_PPH_L2_T pph_l2; + HAL_TAU_PKT_PPH_L3UC_T pph_l3uc; + HAL_TAU_PKT_PPH_L3MC_T pph_l3mc; + HAL_TAU_PKT_PPH_L25_T pph_l25; + }; +} HAL_TAU_PKT_PPH_T; + +/* ----------------------------------------------------------------------------------- Reg Type */ +typedef enum +{ + HAL_TAU_PKT_L2_ISR_RCH0 = (0x1 << 0), + HAL_TAU_PKT_L2_ISR_RCH1 = (0x1 << 1), + HAL_TAU_PKT_L2_ISR_RCH2 = (0x1 << 2), + HAL_TAU_PKT_L2_ISR_RCH3 = (0x1 << 3), + HAL_TAU_PKT_L2_ISR_TCH0 = (0x1 << 4), + HAL_TAU_PKT_L2_ISR_TCH1 = (0x1 << 5), + HAL_TAU_PKT_L2_ISR_TCH2 = (0x1 << 6), + HAL_TAU_PKT_L2_ISR_TCH3 = (0x1 << 7), + HAL_TAU_PKT_L2_ISR_RX_QID_MAP_ERR = (0x1 << 8), + HAL_TAU_PKT_L2_ISR_RX_FRAME_ERR = (0x1 << 9) + +} HAL_TAU_PKT_L2_ISR_T; + +typedef enum +{ + HAL_TAU_PKT_TX_CHANNEL_L2_ISR_GPD_HWO_ERROR = (0x1 << 0), /* Tx GPD.hwo = 0 */ + HAL_TAU_PKT_TX_CHANNEL_L2_ISR_GPD_CHKSM_ERROR = (0x1 << 1), /* Tx GPD.chksm is error */ + HAL_TAU_PKT_TX_CHANNEL_L2_ISR_GPD_NO_OVFL_ERROR = (0x1 << 2), /* S/W push too much GPD */ + HAL_TAU_PKT_TX_CHANNEL_L2_ISR_GPD_DMA_READ_ERROR = (0x1 << 3), /* AXI Rd Error when do GPD read */ + HAL_TAU_PKT_TX_CHANNEL_L2_ISR_BUF_SIZE_ERROR = (0x1 << 4), /* Tx GPD.data_buf_size = 0 */ + HAL_TAU_PKT_TX_CHANNEL_L2_ISR_RUNT_ERROR = (0x1 << 5), /* Tx GPD.pkt_len < 64 */ + HAL_TAU_PKT_TX_CHANNEL_L2_ISR_OVSZ_ERROR = (0x1 << 6), /* Tx GPD.pkt_len = 9217 */ + HAL_TAU_PKT_TX_CHANNEL_L2_ISR_LEN_MISMATCH_ERROR = (0x1 << 7), /* Tx GPD.pkt_len != sum of data_buf_size */ + HAL_TAU_PKT_TX_CHANNEL_L2_ISR_PKTPL_DMA_READ_ERROR = (0x1 << 8), /* AXI Rd Error when do Payload read */ + HAL_TAU_PKT_TX_CHANNEL_L2_ISR_COS_ERROR = (0x1 << 9), /* Tx GPD.cos is not match cos_to_tch_map */ + HAL_TAU_PKT_TX_CHANNEL_L2_ISR_GPD_GT255_ERROR = (0x1 << 10), /* Multi-GPD packet's GPD# > 255 */ + HAL_TAU_PKT_TX_CHANNEL_L2_ISR_PFC = (0x1 << 11), /* */ + HAL_TAU_PKT_TX_CHANNEL_L2_ISR_CREDIT_UDFL_ERROR = (0x1 << 12), /* Credit Underflow (count down to 0) */ + HAL_TAU_PKT_TX_CHANNEL_L2_ISR_DMA_WRITE_ERROR = (0x1 << 13), /* AXI Wr Error (GPD Write-Back) */ + HAL_TAU_PKT_TX_CHANNEL_L2_ISR_STOP_CMD_CPLT = (0x1 << 14) + +} HAL_TAU_PKT_TX_CHANNEL_L2_ISR_T; + +typedef enum +{ + HAL_TAU_PKT_RX_CHANNEL_L2_ISR_AVAIL_GPD_LOW = (0x1 << 0), /* Rx GPD.avbl_gpd_num < threshold */ + HAL_TAU_PKT_RX_CHANNEL_L2_ISR_AVAIL_GPD_EMPTY = (0x1 << 1), /* Rx GPD.avbl_gpd_num = 0 */ + HAL_TAU_PKT_RX_CHANNEL_L2_ISR_AVAIL_GPD_ERROR = (0x1 << 2), /* Rx GPD.hwo = 0 */ + HAL_TAU_PKT_RX_CHANNEL_L2_ISR_GPD_CHKSM_ERROR = (0x1 << 3), /* Rx GPD.chksm is error */ + HAL_TAU_PKT_RX_CHANNEL_L2_ISR_DMA_READ_ERROR = (0x1 << 4), /* DMAR error occurs in PCIE */ + HAL_TAU_PKT_RX_CHANNEL_L2_ISR_DMA_WRITE_ERROR = (0x1 << 5), /* DMAW error occurs in PCIE */ + HAL_TAU_PKT_RX_CHANNEL_L2_ISR_STOP_CMD_CPLT = (0x1 << 6), /* Stop Completion Acknowledge */ + HAL_TAU_PKT_RX_CHANNEL_L2_ISR_GPD_GT255_ERROR = (0x1 << 7), /* Multi-GPD packet's GPD# > 255 */ + HAL_TAU_PKT_RX_CHANNEL_L2_ISR_TOD_UNINIT = (0x1 << 8), /* */ + HAL_TAU_PKT_RX_CHANNEL_L2_ISR_PKT_ERROR_DROP = (0x1 << 9), /* */ + HAL_TAU_PKT_RX_CHANNEL_L2_ISR_UDSZ_DROP = (0x1 << 10), /* */ + HAL_TAU_PKT_RX_CHANNEL_L2_ISR_OVSZ_DROP = (0x1 << 11), /* */ + HAL_TAU_PKT_RX_CHANNEL_L2_ISR_CMDQ_OVF_DROP = (0x1 << 12), /* */ + HAL_TAU_PKT_RX_CHANNEL_L2_ISR_FIFO_OVF_DROP = (0x1 << 13) + +} HAL_TAU_PKT_RX_CHANNEL_L2_ISR_T; + +typedef enum +{ + HAL_TAU_PKT_TX_CHANNEL_CFG_IOC = (0x1 << 0), + HAL_TAU_PKT_TX_CHANNEL_CFG_CHKSUM = (0x1 << 1), + HAL_TAU_PKT_TX_CHANNEL_CFG_PFC = (0x1 << 2), + HAL_TAU_PKT_TX_CHANNEL_CFG_PKT_LEN_CHK = (0x1 << 3), + HAL_TAU_PKT_TX_CHANNEL_CFG_EARLY_DONE_IRQ = (0x1 << 4), + HAL_TAU_PKT_TX_CHANNEL_CFG_CHK_COS = (0x1 << 5), + HAL_TAU_PKT_TX_CHANNEL_CFG_ADV_GPD_WRBK = (0x1 << 6), + HAL_TAU_PKT_TX_CHANNEL_CFG_GPD_WRBK_FULL_PKT_LEN = (0x1 << 7), + HAL_TAU_PKT_TX_CHANNEL_CFG_LAST = (0x1 << 8) + +} HAL_TAU_PKT_TX_CHANNEL_CFG_T; + +typedef enum +{ + HAL_TAU_PKT_RX_CHANNEL_CFG_IOC = (0x1 << 0), + HAL_TAU_PKT_RX_CHANNEL_CFG_CHKSUM = (0x1 << 1), + HAL_TAU_PKT_RX_CHANNEL_CFG_LAST = (0x1 << 2) + +} HAL_TAU_PKT_RX_CHANNEL_CFG_T; + +/* ----------------------------------------------------------------------------------- Tx */ +typedef enum +{ + HAL_TAU_PKT_TX_CHANNEL_0 = 0, + HAL_TAU_PKT_TX_CHANNEL_1, + HAL_TAU_PKT_TX_CHANNEL_2, + HAL_TAU_PKT_TX_CHANNEL_3, + HAL_TAU_PKT_TX_CHANNEL_LAST + +} HAL_TAU_PKT_TX_CHANNEL_T; + +typedef void +(*HAL_TAU_PKT_TX_FUNC_T)( + const UI32_T unit, + const void *ptr_sw_gpd, /* SW-GPD to be processed */ + void *ptr_coockie); /* Private data of SDK */ + +typedef struct HAL_TAU_PKT_TX_SW_GPD_S +{ + HAL_TAU_PKT_TX_FUNC_T callback; /* (unit, ptr_sw_gpd, ptr_cookie) */ + void *ptr_cookie; /* Pointer of NPS_PKT_TX_PKT_T */ + HAL_TAU_PKT_TX_GPD_T tx_gpd; + UI32_T gpd_num; + struct HAL_TAU_PKT_TX_SW_GPD_S *ptr_next; + +#if defined (NPS_EN_NETIF) + UI32_T channel; /* For counter */ +#endif + +} HAL_TAU_PKT_TX_SW_GPD_T; + +typedef struct +{ + UI32_T send_ok; + UI32_T gpd_empty; + UI32_T poll_timeout; + + /* queue */ + UI32_T enque_ok; + UI32_T enque_retry; + + /* event */ + UI32_T trig_event; + + /* normal interrupt */ + UI32_T tx_done; + + /* abnormal interrupt */ + UI32_T gpd_hwo_err; /* bit-0 */ + UI32_T gpd_chksm_err; /* bit-1 */ + UI32_T gpd_no_ovfl_err; /* bit-2 */ + UI32_T gpd_dma_read_err; /* bit-3 */ + UI32_T buf_size_err; /* bit-4 */ + UI32_T runt_err; /* bit-5 */ + UI32_T ovsz_err; /* bit-6 */ + UI32_T len_mismatch_err; /* bit-7 */ + UI32_T pktpl_dma_read_err; /* bit-8 */ + UI32_T cos_err; /* bit-9 */ + UI32_T gpd_gt255_err; /* bit-10 */ + UI32_T pfc; /* bit-11 */ + UI32_T credit_udfl_err; /* bit-12 */ + UI32_T dma_write_err; /* bit-13 */ + UI32_T sw_issue_stop; /* bit-14 */ + + /* others */ + UI32_T err_recover; + UI32_T ecc_err; + +} HAL_TAU_PKT_TX_CHANNEL_CNT_T; + +typedef struct +{ + HAL_TAU_PKT_TX_CHANNEL_CNT_T channel[HAL_TAU_PKT_TX_CHANNEL_LAST]; + UI32_T invoke_gpd_callback; + UI32_T no_memory; + + /* queue */ + UI32_T deque_ok; + UI32_T deque_fail; + + /* event */ + UI32_T wait_event; + +} HAL_TAU_PKT_TX_CNT_T; + +/* ----------------------------------------------------------------------------------- Rx */ +typedef enum +{ + HAL_TAU_PKT_RX_CHANNEL_0 = 0, + HAL_TAU_PKT_RX_CHANNEL_1, + HAL_TAU_PKT_RX_CHANNEL_2, + HAL_TAU_PKT_RX_CHANNEL_3, + HAL_TAU_PKT_RX_CHANNEL_LAST +} HAL_TAU_PKT_RX_CHANNEL_T; + +typedef enum +{ + HAL_TAU_PKT_C_NEXT = 0, /* callback continuous */ + HAL_TAU_PKT_C_STOP = 1, + HAL_TAU_PKT_C_OTHERS = 2 +} HAL_TAU_PKT_CALLBACK_NO_T; + +typedef enum +{ + HAL_TAU_PKT_RX_CALLBACK_ACTION_INSERT = 0, + HAL_TAU_PKT_RX_CALLBACK_ACTION_APPEND = 1, + HAL_TAU_PKT_RX_CALLBACK_ACTION_DELETE = 2, + HAL_TAU_PKT_RX_CALLBACK_ACTION_DELETE_ALL = 3 +} HAL_TAU_PKT_RX_CALLBACK_ACTION_T; + +typedef HAL_TAU_PKT_CALLBACK_NO_T +(*HAL_TAU_PKT_RX_FUNC_T)( + const UI32_T unit, + const void *ptr_sw_gpd, /* SW-GPD to be processed */ + void *ptr_cookie); /* Private data of SDK */ + +typedef struct HAL_TAU_PKT_RX_CALLBACK_S +{ + HAL_TAU_PKT_RX_FUNC_T callback; /* (unit, ptr_sw_gpd, ptr_cookie) */ + void *ptr_cookie; + struct HAL_TAU_PKT_RX_CALLBACK_S *ptr_next; +} HAL_TAU_PKT_RX_CALLBACK_T; + +typedef struct HAL_TAU_PKT_RX_SW_GPD_S +{ + BOOL_T rx_complete; /* FALSE when PDMA error occurs */ + HAL_TAU_PKT_RX_GPD_T rx_gpd; + struct HAL_TAU_PKT_RX_SW_GPD_S *ptr_next; + +#if defined (NPS_EN_NETIF) + void *ptr_cookie; /* Pointer of virt-addr */ +#endif + +} HAL_TAU_PKT_RX_SW_GPD_T; + +typedef struct +{ + /* queue */ + UI32_T enque_ok; + UI32_T enque_retry; + UI32_T deque_ok; + UI32_T deque_fail; + + /* event */ + UI32_T trig_event; + + /* normal interrupt */ + UI32_T rx_done; + + /* abnormal interrupt */ + UI32_T avbl_gpd_low; /* bit-0 */ + UI32_T avbl_gpd_empty; /* bit-1 */ + UI32_T avbl_gpd_err; /* bit-2 */ + UI32_T gpd_chksm_err; /* bit-3 */ + UI32_T dma_read_err; /* bit-4 */ + UI32_T dma_write_err; /* bit-5 */ + UI32_T sw_issue_stop; /* bit-6 */ + UI32_T gpd_gt255_err; /* bit-7 */ + UI32_T tod_uninit; /* bit-8 */ + UI32_T pkt_err_drop; /* bit-9 */ + UI32_T udsz_drop; /* bit-10 */ + UI32_T ovsz_drop; /* bit-11 */ + UI32_T cmdq_ovf_drop; /* bit-12 */ + UI32_T fifo_ovf_drop; /* bit-13 */ + + /* others */ + UI32_T err_recover; + UI32_T ecc_err; + +#if defined (NPS_EN_NETIF) + /* it means that user doesn't create intf on that port */ + UI32_T netdev_miss; +#endif + + +} HAL_TAU_PKT_RX_CHANNEL_CNT_T; + +typedef struct +{ + HAL_TAU_PKT_RX_CHANNEL_CNT_T channel[HAL_TAU_PKT_RX_CHANNEL_LAST]; + UI32_T invoke_gpd_callback; + UI32_T no_memory; + + /* event */ + UI32_T wait_event; + +} HAL_TAU_PKT_RX_CNT_T; + +/* ----------------------------------------------------------------------------------- Reg */ +#if defined(NPS_EN_LITTLE_ENDIAN) + +typedef union +{ + UI32_T reg; + struct + { + UI32_T tch_axlen_cfg : 3; + UI32_T : 5; + UI32_T tch_axi_free_arvalid : 1; + UI32_T : 7; + UI32_T tch_arvalid_thrhold_cfg : 2; + UI32_T : 6; + UI32_T tch_rready_low_4_hdr : 1; + UI32_T tch_ios_crdt_add_en : 1; + UI32_T : 6; + } field; +} HAL_TAU_PKT_AXI_LEN_CFG_REG_T; + +typedef union +{ + UI32_T reg; + struct + { + UI32_T pdma_lbk_en : 1; + UI32_T : 3; + UI32_T pdma_lbk_plane : 2; + UI32_T : 2; + UI32_T pm_lbk_en : 1; + UI32_T : 7; + UI32_T pm_lbk_rqid : 6; + UI32_T : 2; + UI32_T : 8; + } field; +} HAL_TAU_PKT_LBK_CTRL_REG_T; + +typedef union +{ + UI32_T reg; + struct + { + UI32_T pdma_lbk_rqid0 : 6; + UI32_T : 2; + UI32_T pdma_lbk_rqid1 : 6; + UI32_T : 2; + UI32_T pdma_lbk_rqid2 : 6; + UI32_T : 2; + UI32_T pdma_lbk_rqid3 : 6; + UI32_T : 2; + } field; +} HAL_TAU_PKT_LBK_RQID0_3_REG_T; + +typedef union +{ + UI32_T reg; + struct + { + UI32_T pdma_lbk_rqid4 : 6; + UI32_T : 2; + UI32_T pdma_lbk_rqid5 : 6; + UI32_T : 2; + UI32_T pdma_lbk_rqid6 : 6; + UI32_T : 2; + UI32_T pdma_lbk_rqid7 : 6; + UI32_T : 2; + } field; +} HAL_TAU_PKT_LBK_RQID4_7_REG_T; + +typedef union +{ + UI32_T reg; + struct + { + UI32_T cos_pfc_sts0 : 8; + UI32_T cos_pfc_sts1 : 8; + UI32_T cos_pfc_sts2 : 8; + UI32_T cos_pfc_sts3 : 8; + } field; +} HAL_TAU_PKT_COS_PFC_STS_REG_T; + +typedef union +{ + UI32_T reg; + struct + { + UI32_T pdma_ela_en : 1; + UI32_T : 7; + UI32_T pdma_ela_valid_sel : 8; + UI32_T : 8; + UI32_T : 8; + } field; +} HAL_TAU_PKT_ELA_CTRL_REG_T; + +typedef union +{ + UI32_T reg; + struct + { + UI32_T pdma_ela_word0_sel : 8; + UI32_T pdma_ela_word1_sel : 8; + UI32_T pdma_ela_word2_sel : 8; + UI32_T pdma_ela_word3_sel : 8; + } field; +} HAL_TAU_PKT_ELA_SEL_REG_T; + +typedef union +{ + UI32_T reg; + struct + { + UI32_T ingr_pln_ios_credit_base_size_lo : 8; + UI32_T ingr_pln_ios_credit_base_size_hi : 8; + UI32_T ingr_pln_ios_credit_set : 1; + UI32_T : 7; + UI32_T : 1; + UI32_T ingr_pln_full_pkt_mode : 1; + UI32_T : 6; + } field; +} HAL_TAU_PKT_IGR_PLN_CREDIT_CFG_REG_T; + +typedef union +{ + UI32_T reg; + struct + { + UI32_T ingr_pln_cur_ios_credit_lo : 8; + UI32_T ingr_pln_cur_ios_credit_hi : 8; + UI32_T ingr_pln_ios_credit_ovfl : 1; + UI32_T ingr_pln_ios_credit_udfl : 1; + UI32_T : 6; + UI32_T : 8; + } field; +} HAL_TAU_PKT_IGR_PLN_CREDIT_STS_REG_T; + +typedef union +{ + UI32_T reg; + struct + { + UI32_T ingr_pln_ios_credit_rdy_lo_bound : 8; + UI32_T ingr_pln_ios_credit_rdy_hi_bound : 8; + UI32_T : 8; + UI32_T : 8; + } field; +} HAL_TAU_PKT_IGR_PLN_CREDIT_THR_REG_T; + + +typedef union +{ + UI32_T reg; + struct + { + UI32_T rch_stomp_crc_en : 1; + UI32_T : 7; + UI32_T rch_crc_regen_en : 1; + UI32_T : 7; + UI32_T rch_pfc_fun_en : 1; + UI32_T : 7; + UI32_T : 8; + } field; +} HAL_TAU_PKT_RCH_STOMP_CRC_CTRL_REG_T; + +typedef union +{ + UI32_T reg; + struct + { + UI32_T rch_ioc_en : 1; + UI32_T : 7; + UI32_T rch_chksm_en : 1; + UI32_T : 7; + UI32_T : 8; + UI32_T : 8; + } field; +} HAL_TAU_PKT_RCH_MISC_REG_T; + +typedef union +{ + UI32_T reg; + struct + { + UI32_T rch_gpd_pfc_lo : 8; + UI32_T rch_gpd_pfc_hi : 8; + UI32_T : 8; + UI32_T : 8; + } field; +} HAL_TAU_PKT_RCH_GPD_PFC_CTRL_REG_T; + +typedef union +{ + UI32_T reg; + struct + { + UI32_T rch_fifo_pfc_lo_lo : 8; + UI32_T rch_fifo_pfc_lo_hi : 3; + UI32_T : 5; + UI32_T rch_fifo_pfc_hi_lo : 8; + UI32_T rch_fifo_pfc_hi_hi : 3; + UI32_T : 5; + } field; +} HAL_TAU_PKT_RCH_FIFO_PFC_CTRL_REG_T; + +typedef union +{ + UI32_T reg; + struct + { + UI32_T rch_cmdq_pfc_lo : 5; + UI32_T : 3; + UI32_T rch_cmdq_pfc_hi : 5; + UI32_T : 3; + UI32_T : 8; + UI32_T : 8; + } field; +} HAL_TAU_PKT_RCH_CMDQ_PFC_CTRL_REG_T; + +typedef union +{ + UI32_T reg; + struct + { + UI32_T rch_start : 1; + UI32_T rch_resume : 1; + UI32_T rch_stop : 1; + UI32_T : 5; + UI32_T : 8; + UI32_T rch_gpd_add_no_lo : 8; + UI32_T rch_gpd_add_no_hi : 8; + } field; +} HAL_TAU_PKT_RCH_CMD_REG_T; + +typedef union +{ + UI32_T reg; + struct + { + UI32_T rch_fifo_ovf_drop_cnt_clr : 1; + UI32_T rch_cmdq_ovf_drop_cnt_clr : 1; + UI32_T rch_ovsz_drop_cnt_clr : 1; + UI32_T rch_udsz_drop_cnt_clr : 1; + UI32_T rch_pkterr_drop_cnt_clr : 1; + UI32_T rch_flush_cnt_clr : 1; + UI32_T : 2; + UI32_T : 8; + UI32_T : 8; + UI32_T : 8; + } field; +} HAL_TAU_PKT_RCH_CNT_CLR_REG_T; + +typedef union +{ + UI32_T reg; + struct + { + UI32_T rch_active : 1; + UI32_T rch_avbl_gpd_pfc : 1; + UI32_T rch_fifo_pfc : 1; + UI32_T rch_cmdq_pfc : 1; + UI32_T rch_pfc : 1; + UI32_T : 3; + UI32_T : 8; + UI32_T rch_avbl_gpd_no_lo : 8; + UI32_T rch_avbl_gpd_no_hi : 8; + } field; +} HAL_TAU_PKT_RCH_STATUS_REG_T; + +typedef union +{ + UI32_T reg; + struct + { + UI32_T tch_ioc_en : 1; + UI32_T tch_chksm_en : 1; + UI32_T tch_pfc_en : 1; + UI32_T tch_pktlen_chk_en : 1; + UI32_T tch_early_done_irq : 1; + UI32_T tch_chk_cos_en : 1; + UI32_T tch_adv_gpd_wrbk : 1; + UI32_T tch_gpd_wrbk_full_pkt_len : 1; + UI32_T : 8; + UI32_T : 8; + UI32_T : 8; + } field; +} HAL_TAU_PKT_TCH_CFG_REG_T; + +typedef union +{ + UI32_T reg; + struct + { + UI32_T tch_start : 1; + UI32_T tch_resume : 1; + UI32_T tch_stop : 1; + UI32_T : 5; + UI32_T : 8; + UI32_T tch_gpd_add_no_lo : 8; + UI32_T tch_gpd_add_no_hi : 8; + } field; +} HAL_TAU_PKT_TCH_CMD_REG_T; + +typedef union +{ + UI32_T reg; + struct + { + UI32_T tch_active : 1; + UI32_T tch_pfc : 1; + UI32_T tch_gpd_rd_dma_act : 1; + UI32_T : 5; + UI32_T : 8; + UI32_T tch_avbl_gpd_no : 1; + UI32_T : 7; + UI32_T : 8; + } field; +} HAL_TAU_PKT_TCH_STS_REG_T; + +typedef union +{ + UI32_T reg; + struct + { + UI32_T tch_gpd_dmar_qos : 4; + UI32_T : 4; + UI32_T tch_pkt_dmar_qos : 4; + UI32_T : 4; + UI32_T tch_gpd_dmaw_qos : 4; + UI32_T : 4; + UI32_T : 8; + } field; +} HAL_TAU_PKT_TCH_QOS_CFG_REG_T; + +#elif defined(NPS_EN_BIG_ENDIAN) + +typedef union +{ + UI32_T reg; + struct + { + UI32_T : 6; + UI32_T tch_ios_crdt_add_en : 1; + UI32_T tch_rready_low_4_hdr : 1; + UI32_T : 6; + UI32_T tch_arvalid_thrhold_cfg : 2; + UI32_T : 7; + UI32_T tch_axi_free_arvalid : 1; + UI32_T : 5; + UI32_T tch_axlen_cfg : 3; + } field; +} HAL_TAU_PKT_AXI_LEN_CFG_REG_T; + +typedef union +{ + UI32_T reg; + struct + { + UI32_T : 8; + UI32_T : 2; + UI32_T pm_lbk_rqid : 6; + UI32_T : 7; + UI32_T pm_lbk_en : 1; + UI32_T : 2; + UI32_T pdma_lbk_plane : 2; + UI32_T : 3; + UI32_T pdma_lbk_en : 1; + } field; +} HAL_TAU_PKT_LBK_CTRL_REG_T; + +typedef union +{ + UI32_T reg; + struct + { + UI32_T : 2; + UI32_T pdma_lbk_rqid3 : 6; + UI32_T : 2; + UI32_T pdma_lbk_rqid2 : 6; + UI32_T : 2; + UI32_T pdma_lbk_rqid1 : 6; + UI32_T : 2; + UI32_T pdma_lbk_rqid0 : 6; + } field; +} HAL_TAU_PKT_LBK_RQID0_3_REG_T; + +typedef union +{ + UI32_T reg; + struct + { + UI32_T : 2; + UI32_T pdma_lbk_rqid7 : 6; + UI32_T : 2; + UI32_T pdma_lbk_rqid6 : 6; + UI32_T : 2; + UI32_T pdma_lbk_rqid5 : 6; + UI32_T : 2; + UI32_T pdma_lbk_rqid4 : 6; + } field; +} HAL_TAU_PKT_LBK_RQID4_7_REG_T; + +typedef union +{ + UI32_T reg; + struct + { + UI32_T cos_pfc_sts3 : 8; + UI32_T cos_pfc_sts2 : 8; + UI32_T cos_pfc_sts1 : 8; + UI32_T cos_pfc_sts0 : 8; + } field; +} HAL_TAU_PKT_COS_PFC_STS_REG_T; + +typedef union +{ + UI32_T reg; + struct + { + UI32_T : 8; + UI32_T : 8; + UI32_T pdma_ela_valid_sel : 8; + UI32_T : 7; + UI32_T pdma_ela_en : 1; + } field; +} HAL_TAU_PKT_ELA_CTRL_REG_T; + +typedef union +{ + UI32_T reg; + struct + { + UI32_T pdma_ela_word3_sel : 8; + UI32_T pdma_ela_word2_sel : 8; + UI32_T pdma_ela_word1_sel : 8; + UI32_T pdma_ela_word0_sel : 8; + } field; +} HAL_TAU_PKT_ELA_SEL_REG_T; + +typedef union +{ + UI32_T reg; + struct + { + UI32_T : 6; + UI32_T ingr_pln_full_pkt_mode : 1; + UI32_T : 1; + UI32_T : 7; + UI32_T ingr_pln_ios_credit_set : 1; + UI32_T ingr_pln_ios_credit_base_size_hi : 8; + UI32_T ingr_pln_ios_credit_base_size_lo : 8; + } field; +} HAL_TAU_PKT_IGR_PLN_CREDIT_CFG_REG_T; + +typedef union +{ + UI32_T reg; + struct + { + UI32_T : 8; + UI32_T : 6; + UI32_T ingr_pln_ios_credit_udfl : 1; + UI32_T ingr_pln_ios_credit_ovfl : 1; + UI32_T ingr_pln_cur_ios_credit_hi : 8; + UI32_T ingr_pln_cur_ios_credit_lo : 8; + } field; +} HAL_TAU_PKT_IGR_PLN_CREDIT_STS_REG_T; + +typedef union +{ + UI32_T reg; + struct + { + UI32_T : 8; + UI32_T : 8; + UI32_T ingr_pln_ios_credit_rdy_hi_bound : 8; + UI32_T ingr_pln_ios_credit_rdy_lo_bound : 8; + } field; +} HAL_TAU_PKT_IGR_PLN_CREDIT_THR_REG_T; + + +typedef union +{ + UI32_T reg; + struct + { + UI32_T : 8; + UI32_T : 7; + UI32_T rch_pfc_fun_en : 1; + UI32_T : 7; + UI32_T rch_crc_regen_en : 1; + UI32_T : 7; + UI32_T rch_stomp_crc_en : 1; + } field; +} HAL_TAU_PKT_RCH_STOMP_CRC_CTRL_REG_T; + +typedef union +{ + UI32_T reg; + struct + { + UI32_T : 8; + UI32_T : 8; + UI32_T : 7; + UI32_T rch_chksm_en : 1; + UI32_T : 7; + UI32_T rch_ioc_en : 1; + } field; +} HAL_TAU_PKT_RCH_MISC_REG_T; + +typedef union +{ + UI32_T reg; + struct + { + UI32_T : 8; + UI32_T : 8; + UI32_T rch_gpd_pfc_hi : 8; + UI32_T rch_gpd_pfc_lo : 8; + } field; +} HAL_TAU_PKT_RCH_GPD_PFC_CTRL_REG_T; + +typedef union +{ + UI32_T reg; + struct + { + UI32_T : 5; + UI32_T rch_fifo_pfc_hi_hi : 3; + UI32_T rch_fifo_pfc_hi_lo : 8; + UI32_T : 5; + UI32_T rch_fifo_pfc_lo_hi : 3; + UI32_T rch_fifo_pfc_lo_lo : 8; + } field; +} HAL_TAU_PKT_RCH_FIFO_PFC_CTRL_REG_T; + +typedef union +{ + UI32_T reg; + struct + { + UI32_T : 8; + UI32_T : 8; + UI32_T : 3; + UI32_T rch_cmdq_pfc_hi : 5; + UI32_T : 3; + UI32_T rch_cmdq_pfc_lo : 5; + } field; +} HAL_TAU_PKT_RCH_CMDQ_PFC_CTRL_REG_T; + +typedef union +{ + UI32_T reg; + struct + { + UI32_T rch_gpd_add_no_hi : 8; + UI32_T rch_gpd_add_no_lo : 8; + UI32_T : 8; + UI32_T : 5; + UI32_T rch_stop : 1; + UI32_T rch_resume : 1; + UI32_T rch_start : 1; + } field; +} HAL_TAU_PKT_RCH_CMD_REG_T; + +typedef union +{ + UI32_T reg; + struct + { + UI32_T : 8; + UI32_T : 8; + UI32_T : 8; + UI32_T : 2; + UI32_T rch_flush_cnt_clr : 1; + UI32_T rch_pkterr_drop_cnt_clr : 1; + UI32_T rch_udsz_drop_cnt_clr : 1; + UI32_T rch_ovsz_drop_cnt_clr : 1; + UI32_T rch_cmdq_ovf_drop_cnt_clr : 1; + UI32_T rch_fifo_ovf_drop_cnt_clr : 1; + } field; +} HAL_TAU_PKT_RCH_CNT_CLR_REG_T; + +typedef union +{ + UI32_T reg; + struct + { + UI32_T rch_avbl_gpd_no_hi : 8; + UI32_T rch_avbl_gpd_no_lo : 8; + UI32_T : 8; + UI32_T : 3; + UI32_T rch_pfc : 1; + UI32_T rch_cmdq_pfc : 1; + UI32_T rch_fifo_pfc : 1; + UI32_T rch_avbl_gpd_pfc : 1; + UI32_T rch_active : 1; + } field; +} HAL_TAU_PKT_RCH_STATUS_REG_T; + +typedef union +{ + UI32_T reg; + struct + { + UI32_T : 8; + UI32_T : 8; + UI32_T : 8; + UI32_T tch_gpd_wrbk_full_pkt_len : 1; + UI32_T tch_adv_gpd_wrbk : 1; + UI32_T tch_chk_cos_en : 1; + UI32_T tch_early_done_irq : 1; + UI32_T tch_pktlen_chk_en : 1; + UI32_T tch_pfc_en : 1; + UI32_T tch_chksm_en : 1; + UI32_T tch_ioc_en : 1; + } field; +} HAL_TAU_PKT_TCH_CFG_REG_T; + +typedef union +{ + UI32_T reg; + struct + { + UI32_T tch_gpd_add_no_hi : 8; + UI32_T tch_gpd_add_no_lo : 8; + UI32_T : 8; + UI32_T : 5; + UI32_T tch_stop : 1; + UI32_T tch_resume : 1; + UI32_T tch_start : 1; + } field; +} HAL_TAU_PKT_TCH_CMD_REG_T; + +typedef union +{ + UI32_T reg; + struct + { + UI32_T : 8; + UI32_T : 7; + UI32_T tch_avbl_gpd_no : 1; + UI32_T : 8; + UI32_T : 5; + UI32_T tch_gpd_rd_dma_act : 1; + UI32_T tch_pfc : 1; + UI32_T tch_active : 1; + } field; +} HAL_TAU_PKT_TCH_STS_REG_T; + +typedef union +{ + UI32_T reg; + struct + { + UI32_T : 8; + UI32_T : 4; + UI32_T tch_gpd_dmaw_qos : 4; + UI32_T : 4; + UI32_T tch_pkt_dmar_qos : 4; + UI32_T : 4; + UI32_T tch_gpd_dmar_qos : 4; + } field; +} HAL_TAU_PKT_TCH_QOS_CFG_REG_T; + +#else +#error "Host GPD endian is not defined\n" +#endif + +/* ----------------------------------------------------------------------------------- NPS_EN_NETIF */ +#if defined (NPS_EN_NETIF) +#define HAL_TAU_PKT_DRIVER_MAJOR_NUM (10) +#define HAL_TAU_PKT_DRIVER_MINOR_NUM (252) /* DO NOT use MISC_DYNAMIC_MINOR */ +#define HAL_TAU_PKT_DRIVER_NAME "nps_netif" +#define HAL_TAU_PKT_DRIVER_PATH "/dev/"HAL_TAU_PKT_DRIVER_NAME + +/* These requirements come from NPS_NETIF APIs. + * nps_netif -> hal_pkt_drv -> hal_pkt_knl + */ + +typedef struct +{ + UI32_T tx_pkt; + UI32_T tx_queue_full; + UI32_T tx_error; + UI32_T rx_pkt; + +} HAL_TAU_PKT_NETIF_INTF_CNT_T; + +typedef struct +{ + /* unique key */ + UI32_T id; + C8_T name[NPS_NETIF_NAME_LEN]; + UI32_T port; /* only support unit port and local port */ + + /* metadata */ + UI8_T mac[6]; + +#define HAL_TAU_PKT_NETIF_INTF_FLAGS_MAC (1 << 0) + UI32_T flags; + + +} HAL_TAU_PKT_NETIF_INTF_T; + +typedef struct +{ + /* unique key */ + UI32_T id; + C8_T name[NPS_NETIF_NAME_LEN]; + UI32_T priority; + + /* match fields */ + UI32_T port; /* only support unit port and local port */ + HAL_TAU_PKT_RX_REASON_BITMAP_T reason_bitmap; + UI8_T pattern[NPS_NETIF_PROFILE_PATTERN_NUM][NPS_NETIF_PROFILE_PATTERN_LEN]; + UI8_T mask[NPS_NETIF_PROFILE_PATTERN_NUM][NPS_NETIF_PROFILE_PATTERN_LEN]; + UI32_T offset[NPS_NETIF_PROFILE_PATTERN_NUM]; + + /* for each flag 1:must hit, 0:don't care */ +#define HAL_TAU_PKT_NETIF_PROFILE_FLAGS_PORT (1 << 0) +#define HAL_TAU_PKT_NETIF_PROFILE_FLAGS_REASON (1 << 1) +#define HAL_TAU_PKT_NETIF_PROFILE_FLAGS_PATTERN_0 (1 << 2) +#define HAL_TAU_PKT_NETIF_PROFILE_FLAGS_PATTERN_1 (1 << 3) +#define HAL_TAU_PKT_NETIF_PROFILE_FLAGS_PATTERN_2 (1 << 4) +#define HAL_TAU_PKT_NETIF_PROFILE_FLAGS_PATTERN_3 (1 << 5) + UI32_T flags; + +} HAL_TAU_PKT_NETIF_PROFILE_T; + + +/* These requirements come from NPS_PKT APIs. + * nps_pkt -> hal_pkt_srv -> hal_pkt_drv -> hal_pkt_knl + */ +typedef enum +{ + /* network interface */ + HAL_TAU_PKT_IOCTL_TYPE_CREATE_INTF = 0, + HAL_TAU_PKT_IOCTL_TYPE_DESTROY_INTF, + HAL_TAU_PKT_IOCTL_TYPE_GET_INTF, + HAL_TAU_PKT_IOCTL_TYPE_CREATE_PROFILE, + HAL_TAU_PKT_IOCTL_TYPE_DESTROY_PROFILE, + HAL_TAU_PKT_IOCTL_TYPE_GET_PROFILE, + HAL_TAU_PKT_IOCTL_TYPE_GET_INTF_CNT, + HAL_TAU_PKT_IOCTL_TYPE_CLEAR_INTF_CNT, + /* driver */ + HAL_TAU_PKT_IOCTL_TYPE_WAIT_RX_FREE, + HAL_TAU_PKT_IOCTL_TYPE_WAIT_TX_FREE, /* waitTxFree(ASYNC) */ + HAL_TAU_PKT_IOCTL_TYPE_SET_RX_CFG, /* setRxConfig */ + HAL_TAU_PKT_IOCTL_TYPE_GET_RX_CFG, /* getRxConfig */ + HAL_TAU_PKT_IOCTL_TYPE_DEINIT_TASK, /* deinitTask */ + HAL_TAU_PKT_IOCTL_TYPE_DEINIT_DRV, /* deinitDrv */ + HAL_TAU_PKT_IOCTL_TYPE_INIT_TASK, /* initTask */ + HAL_TAU_PKT_IOCTL_TYPE_INIT_DRV, /* initDrv */ + /* counter */ + HAL_TAU_PKT_IOCTL_TYPE_GET_TX_CNT, + HAL_TAU_PKT_IOCTL_TYPE_GET_RX_CNT, + HAL_TAU_PKT_IOCTL_TYPE_CLEAR_TX_CNT, + HAL_TAU_PKT_IOCTL_TYPE_CLEAR_RX_CNT, + /* port attribute */ + HAL_TAU_PKT_IOCTL_TYPE_SET_PORT_ATTR, + HAL_TAU_PKT_IOCTL_TYPE_LAST + +} HAL_TAU_PKT_IOCTL_TYPE_T; + +typedef enum +{ + HAL_TAU_PKT_IOCTL_RX_TYPE_INIT = 0, + HAL_TAU_PKT_IOCTL_RX_TYPE_DEINIT, + HAL_TAU_PKT_IOCTL_RX_TYPE_LAST, + +} HAL_TAU_PKT_IOCTL_RX_TYPE_T; + +typedef struct +{ + UI32_T unit; + UI32_T channel; + HAL_TAU_PKT_RX_CNT_T rx_cnt; + HAL_TAU_PKT_TX_CNT_T tx_cnt; + NPS_ERROR_NO_T rc; + +} HAL_TAU_PKT_IOCTL_CH_CNT_COOKIE_T; + +typedef struct +{ + UI32_T unit; + HAL_TAU_PKT_NETIF_INTF_T net_intf; /* addIntf[In,Out], delIntf[In] */ + HAL_TAU_PKT_NETIF_PROFILE_T net_profile; /* createProfile[In,Out], destroyProfile[In] */ + HAL_TAU_PKT_NETIF_INTF_CNT_T cnt; + NPS_ERROR_NO_T rc; + +} HAL_TAU_PKT_IOCTL_NETIF_COOKIE_T; + +typedef struct +{ + NPS_ADDR_T callback; /* (unit, ptr_sw_gpd, ptr_cookie) */ + NPS_ADDR_T cookie; /* Pointer of NPS_PKT_TX_PKT_T */ + UI32_T channel; + UI32_T gpd_num; + NPS_ADDR_T hw_gpd_addr; + NPS_ADDR_T sw_gpd_addr; + +} HAL_TAU_PKT_IOCTL_TX_GPD_T; + +typedef struct +{ + UI32_T unit; + UI32_T channel; /* sendGpd[In] */ + NPS_ADDR_T ioctl_gpd_addr; /* sendGpd[In] */ + NPS_ADDR_T done_sw_gpd_addr; /* waitTxFree[Out] */ + +} HAL_TAU_PKT_IOCTL_TX_COOKIE_T; + +typedef struct +{ + BOOL_T rx_complete; /* FALSE when PDMA error occurs */ + NPS_ADDR_T hw_gpd_addr; /* Pointer to HW GPD in user's SW GPD struct */ + NPS_ADDR_T dma_buf_addr; /* Pointer to DMA buffer allocated by the user (virtual) */ + +} HAL_TAU_PKT_IOCTL_RX_GPD_T; + +typedef struct +{ + UI32_T unit; + UI32_T channel; /* getRxCnt[In], clearRxInt[In] */ + NPS_ADDR_T ioctl_gpd_addr; /* waitRxFree[Out] */ + UI32_T buf_len; /* setRxCfg[In] */ + HAL_TAU_PKT_IOCTL_RX_TYPE_T rx_type; /* setRxCfg[In] */ + +} HAL_TAU_PKT_IOCTL_RX_COOKIE_T; + +typedef struct +{ + UI32_T port; + UI32_T status; + UI32_T speed; + +} HAL_TAU_PKT_IOCTL_PORT_COOKIE_T; + +typedef union +{ + UI32_T value; + struct + { + UI32_T unit : 6; /* Maximum unit number is 64. */ + HAL_TAU_PKT_IOCTL_TYPE_T type : 10; /* Maximum 1024 IOCTL types */ + UI32_T rsvd : 16; + } field; + +} HAL_TAU_PKT_IOCTL_CMD_T; + +#endif /* End of NPS_EN_NETIF */ + +NPS_ERROR_NO_T +hal_tau_pkt_sendGpd( + const UI32_T unit, + const HAL_TAU_PKT_TX_CHANNEL_T channel, + HAL_TAU_PKT_TX_SW_GPD_T *ptr_sw_gpd); + +/*---------------------------------------------------------------------------*/ +/* perf */ +NPS_ERROR_NO_T +hal_tau_pkt_getTxIntrCnt( + const UI32_T unit, + const UI32_T channel, + UI32_T *ptr_intr_cnt); + +NPS_ERROR_NO_T +hal_tau_pkt_getRxIntrCnt( + const UI32_T unit, + const UI32_T channel, + UI32_T *ptr_intr_cnt); + +/* ioctl */ +NPS_ERROR_NO_T +hal_tau_pkt_getTxKnlCnt( + const UI32_T unit, + HAL_TAU_PKT_IOCTL_CH_CNT_COOKIE_T *ptr_cookie); + +NPS_ERROR_NO_T +hal_tau_pkt_getRxKnlCnt( + const UI32_T unit, + HAL_TAU_PKT_IOCTL_CH_CNT_COOKIE_T *ptr_cookie); + +NPS_ERROR_NO_T +hal_tau_pkt_clearTxKnlCnt( + const UI32_T unit, + HAL_TAU_PKT_IOCTL_TX_COOKIE_T *ptr_cookie); + +NPS_ERROR_NO_T +hal_tau_pkt_clearRxKnlCnt( + const UI32_T unit, + HAL_TAU_PKT_IOCTL_RX_COOKIE_T *ptr_cookie); + +NPS_ERROR_NO_T +hal_tau_pkt_setRxKnlConfig( + const UI32_T unit, + HAL_TAU_PKT_IOCTL_RX_COOKIE_T *ptr_cookie); + +NPS_ERROR_NO_T +hal_tau_pkt_getRxKnlConfig( + const UI32_T unit, + HAL_TAU_PKT_IOCTL_RX_COOKIE_T *ptr_cookie); + +/* perf */ +NPS_ERROR_NO_T +hal_tau_pkt_getNetDev( + const UI32_T unit, + const UI32_T port, + struct net_device **pptr_net_dev); + +NPS_ERROR_NO_T +hal_tau_pkt_prepareGpd( + const UI32_T unit, + const NPS_ADDR_T phy_addr, + const UI32_T len, + const UI32_T port, + HAL_TAU_PKT_TX_SW_GPD_T *ptr_sw_gpd); + +#endif /* end of HAL_TAU_PKT_KNL_H */ diff --git a/platform/nephos/nephos-modules/modules/src/inc/netif_osal.h b/platform/nephos/nephos-modules/modules/src/inc/netif_osal.h new file mode 100755 index 000000000000..40c8c9ebc358 --- /dev/null +++ b/platform/nephos/nephos-modules/modules/src/inc/netif_osal.h @@ -0,0 +1,379 @@ +/* Copyright (C) 2019 Nephos, Inc. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of version 2 of the GNU General Public + * License as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * version 2 along with this program. + */ + +/* FILE NAME: netif_osal.h + * PURPOSE: + * It provide customer linux API. + * NOTES: + */ + +#ifndef NETIF_OSAL_H +#define NETIF_OSAL_H + +/* + * ENOMEM : 12 - Out of memory + * EFAULT : 14 - Bad address + * EBUSY : 16 - Device or resource busy + * ENODEV : 19 - No such device + * EINVAL : 22 - Invalid argument + + * + * NETDEV_TX_OK : 0x00 + * NETDEV_TX_BUSY : 0x10 + + * + * ETH_HLEN : 14 dmac + smac + etyp + * ETH_ZLEN : 60 minimum ethernet frame size + * ETH_DATA_LEN : 1500 + * ETH_FRAME_LEN : 1514 + * ETH_FCS_LEN : 4 + * + * ETH_P_IP : 0x0800 + * ETH_P_ARP : 0x0806 + * ETH_P_IPV6 : 0x86DD + * ETH_P_SLOW : 0x8809 + * ETH_P_1588 : 0x88F7 + + * + * NET_IP_ALIGN : 2 + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +/* ----------------------------------------------------------------------------------- macro value */ +/* Thread */ +#define OSAL_THREAD_NAME_LEN (16) +#define OSAL_THREAD_DFT_NAME ("Unknown") + +/* Semaphore */ +#define OSAL_SEMA_NAME_LEN (16) +#define OSAL_SEMA_DFT_NAME ("Unknown") + +/* Event */ +#define OSAL_EVENT_NAME_LEN (16) +#define OSAL_EVENT_DFT_NAME ("Unknown") + +/* Spinlock */ +#define OSAL_SPIN_NAME_LEN (16) +#define OSAL_SPIN_DFT_NAME ("Unknown") + +/* Queue */ +#define OSAL_QUEUE_NAME_LEN (16) +#define OSAL_QUEUE_DFT_NAME ("Unknown") + +#define OSAL_PRN_BUF_SZ (256) +#define OSAL_TICKS_PER_SEC (1000000) + +/* ----------------------------------------------------------------------------------- struct */ +typedef struct linux_thread_s +{ + char name[OSAL_THREAD_NAME_LEN + 1]; + struct task_struct *ptr_task; + unsigned int is_stop; + struct linux_thread_s *ptr_prev; + struct linux_thread_s *ptr_next; + +} linux_thread_t; + +typedef struct +{ + char name[OSAL_SEMA_NAME_LEN + 1]; + struct semaphore lock; + +} linux_sema_t; + +typedef struct +{ + char name[OSAL_EVENT_NAME_LEN + 1]; + wait_queue_head_t wait_que; + unsigned int condition; + +} linux_event_t; + +typedef struct +{ + char name[OSAL_SPIN_NAME_LEN + 1]; + spinlock_t spinlock; + +} linux_isrlock_t; + +typedef struct +{ + void *ptr_data; +} linux_queue_entry_t; + +typedef struct +{ + char name[OSAL_QUEUE_NAME_LEN + 1]; + int head; /* index of the queue head entry can be read */ + int tail; /* index of the queue tail entry can be write */ + unsigned int wr_cnt; /* enqueue total count */ + unsigned int rd_cnt; /* dequeue total count */ + unsigned int capacity; /* the queue size */ + linux_queue_entry_t *ptr_entry; /* the queue entry buffer */ + +} linux_queue_t; + +typedef struct +{ + unsigned int size; + dma_addr_t phy_addr; + char data[0]; + +} linux_dma_t; + +/* ----------------------------------------------------------------------------------- function */ +void * +osal_memset( + void *ptr_mem, + const I32_T value, + const UI32_T num); + +void * +osal_memcpy( + void *ptr_dst, + const void *ptr_src, + const UI32_T num); + +UI32_T +osal_strlen( + const C8_T *ptr_str); + +void +osal_printf( + const C8_T *ptr_fmt, + ...); + +void * +osal_alloc( + const UI32_T size); + +void +osal_free( + const void *ptr_mem); + +/* thread */ +NPS_ERROR_NO_T +osal_init(void); + +NPS_ERROR_NO_T +osal_deinit(void); + +NPS_ERROR_NO_T +osal_createThread ( + const C8_T *ptr_thread_name, + const UI32_T stack_size, + const UI32_T priority, + void (function)(void*), + void *ptr_arg, + NPS_THREAD_ID_T *ptr_thread_id); + +NPS_ERROR_NO_T +osal_stopThread( + NPS_THREAD_ID_T *ptr_thread_id); + +NPS_ERROR_NO_T +osal_destroyThread( + NPS_THREAD_ID_T *ptr_thread_id); + +void +osal_initRunThread( + void); + +NPS_ERROR_NO_T +osal_isRunThread( + void); + +void +osal_exitRunThread( + void); + +/* semaphore */ +NPS_ERROR_NO_T +osal_createSemaphore( + const C8_T *ptr_sema_name, + const UI32_T sema_count, + NPS_SEMAPHORE_ID_T *ptr_semaphore_id); + +NPS_ERROR_NO_T +osal_takeSemaphore( + NPS_SEMAPHORE_ID_T *ptr_semaphore_id, + UI32_T time_out); + +NPS_ERROR_NO_T +osal_giveSemaphore( + NPS_SEMAPHORE_ID_T *ptr_semaphore_id); + +NPS_ERROR_NO_T +osal_destroySemaphore( + NPS_SEMAPHORE_ID_T *ptr_semaphore_id); + +/* event */ +NPS_ERROR_NO_T +osal_createEvent( + const C8_T *ptr_event_name, + NPS_SEMAPHORE_ID_T *ptr_event_id); + +NPS_ERROR_NO_T +osal_waitEvent( + NPS_SEMAPHORE_ID_T *ptr_event_id); + +NPS_ERROR_NO_T +osal_triggerEvent( + NPS_SEMAPHORE_ID_T *ptr_event_id); + +NPS_ERROR_NO_T +osal_destroyEvent( + NPS_SEMAPHORE_ID_T *ptr_event_id); + +/* isr_lock */ +NPS_ERROR_NO_T +osal_createIsrLock( + const C8_T *ptr_isrlock_name, + NPS_ISRLOCK_ID_T *ptr_isrlock_id); + +NPS_ERROR_NO_T +osal_takeIsrLock( + NPS_ISRLOCK_ID_T *ptr_isrlock_id, + NPS_IRQ_FLAGS_T *ptr_irq_flags); + +NPS_ERROR_NO_T +osal_giveIsrLock( + NPS_ISRLOCK_ID_T *ptr_isrlock_id, + NPS_IRQ_FLAGS_T *ptr_irq_flags); + +NPS_ERROR_NO_T +osal_destroyIsrLock( + NPS_ISRLOCK_ID_T *ptr_isrlock_id); + +/* timer */ +NPS_ERROR_NO_T +osal_sleepThread( + const UI32_T usecond); + +NPS_ERROR_NO_T +osal_getTime( + NPS_TIME_T *ptr_time); + +/* queue */ +NPS_ERROR_NO_T +osal_que_create( + NPS_HUGE_T *ptr_queue_id, + UI32_T capacity); + +NPS_ERROR_NO_T +osal_que_enque( + NPS_HUGE_T *ptr_queue_id, + void *ptr_data); + +NPS_ERROR_NO_T +osal_que_deque( + NPS_HUGE_T *ptr_queue_id, + void **pptr_data); + +NPS_ERROR_NO_T +osal_que_destroy( + NPS_HUGE_T *ptr_queue_id); + +NPS_ERROR_NO_T +osal_que_getCount( + NPS_HUGE_T *ptr_queue_id, + unsigned int *ptr_count); + +/* IO */ +int +osal_io_copyToUser( + void *ptr_usr_buf, + void *ptr_knl_buf, + unsigned int size); + +int +osal_io_copyFromUser( + void *ptr_knl_buf, + void *ptr_usr_buf, + unsigned int size); + +/* dma */ +void * +osal_dma_alloc( + const UI32_T size); + +NPS_ERROR_NO_T +osal_dma_free( + void *ptr_dma_mem); + +dma_addr_t +osal_dma_convertVirtToPhy( + void *ptr_virt_addr); + +void * +osal_dma_convertPhyToVirt( + const dma_addr_t phy_addr); + +int +osal_dma_flushCache( + void *ptr_virt_addr, + const unsigned int size); + +int +osal_dma_invalidateCache( + void *ptr_virt_addr, + const unsigned int size); + +/* skb */ +struct sk_buff * +osal_skb_alloc( + UI32_T size); + +void +osal_skb_free( + struct sk_buff *ptr_skb); + +dma_addr_t +osal_skb_mapDma( + struct sk_buff *ptr_skb, + enum dma_data_direction dir); + +void +osal_skb_unmapDma( + const dma_addr_t phy_addr, + UI32_T size, + enum dma_data_direction dir); + +void +osal_skb_send( + struct sk_buff *ptr_skb); + +void +osal_skb_recv( + struct sk_buff *ptr_skb); + +#endif /* end of NETIF_OSAL_H */ diff --git a/platform/nephos/nephos-modules/modules/src/inc/netif_perf.h b/platform/nephos/nephos-modules/modules/src/inc/netif_perf.h new file mode 100755 index 000000000000..35596668ba9d --- /dev/null +++ b/platform/nephos/nephos-modules/modules/src/inc/netif_perf.h @@ -0,0 +1,81 @@ +/* Copyright (C) 2019 Nephos, Inc. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of version 2 of the GNU General Public + * License as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * version 2 along with this program. + */ + +/* FILE NAME: netif_perf.h + * PURPOSE: + * It provide customer performance test API. + * NOTES: + */ + +#ifndef NETIF_PERF_H +#define NETIF_PERF_H + +/* #define PERF_EN_TEST */ + +/* FUNCTION NAME: perf_rxCallback + * PURPOSE: + * To count the Rx-gpd for Rx-test. + * INPUT: + * len -- To check if the Rx-gpd length equals to test length. + * OUTPUT: + * None + * RETURN: + * NPS_E_OK -- Successful operation. + * NOTES: + * None + */ +NPS_ERROR_NO_T +perf_rxCallback( + const UI32_T len); + +/* FUNCTION NAME: perf_rxTest + * PURPOSE: + * To check if Rx-test is going. + * INPUT: + * None + * OUTPUT: + * None + * RETURN: + * NPS_E_OK -- Successful operation. + * NOTES: + * None + */ +NPS_ERROR_NO_T +perf_rxTest( + void); + +/* FUNCTION NAME: perf_test + * PURPOSE: + * To do Tx-test or Rx-test. + * INPUT: + * len -- Test length + * tx_channel -- Test Tx channel numbers + * rx_channel -- Test Rx channel numbers + * test_skb -- Test GPD or SKB + * OUTPUT: + * None + * RETURN: + * NPS_E_OK -- Successful operation. + * NOTES: + * None + */ +NPS_ERROR_NO_T +perf_test( + UI32_T len, + UI32_T tx_channel, + UI32_T rx_channel, + BOOL_T test_skb); + +#endif /* end of NETIF_PERF_H */ diff --git a/platform/nephos/nephos-modules/modules/src/inc/nps_cfg.h b/platform/nephos/nephos-modules/modules/src/inc/nps_cfg.h new file mode 100755 index 000000000000..34306344c55a --- /dev/null +++ b/platform/nephos/nephos-modules/modules/src/inc/nps_cfg.h @@ -0,0 +1,317 @@ +/* Copyright (C) 2019 Nephos, Inc. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of version 2 of the GNU General Public + * License as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * version 2 along with this program. + */ + +/* FILE NAME: nps_cfg.h + * PURPOSE: + * Customer configuration on NPS SDK. + * NOTES: + */ + +#ifndef NPS_CFG_H +#define NPS_CFG_H + +/* INCLUDE FILE DECLARATIONS + */ + +#include +#include + + +/* NAMING CONSTANT DECLARATIONS + */ + +/* MACRO FUNCTION DECLARATIONS + */ +#define NPS_CFG_MAXIMUM_CHIPS_PER_SYSTEM (16) +#define NPS_CFG_USER_PORT_NUM (96) + +/* DATA TYPE DECLARATIONS + */ +typedef enum +{ + NPS_CFG_TYPE_CHIP_MODE, /* chip operating mode. 0: legacy mode, 1:hybrid mode */ + + NPS_CFG_TYPE_PORT_MAX_SPEED, /* Reference to NPS_PORT_SPEED_XXX */ + NPS_CFG_TYPE_PORT_LANE_NUM, + NPS_CFG_TYPE_PORT_TX_LANE, + NPS_CFG_TYPE_PORT_RX_LANE, + NPS_CFG_TYPE_PORT_TX_POLARITY_REV, + NPS_CFG_TYPE_PORT_RX_POLARITY_REV, + NPS_CFG_TYPE_PORT_EXT_LANE, + NPS_CFG_TYPE_PORT_VALID, + + /* l2 module related configuration */ + NPS_CFG_TYPE_L2_THREAD_PRI, + NPS_CFG_TYPE_L2_THREAD_STACK, /* customize L2 thread stack size in bytes */ + NPS_CFG_TYPE_L2_ADDR_MODE, /* L2 address operation mode. 0: Polling mode, 1: FIFO mode */ + + /* PKT module related configuration */ + NPS_CFG_TYPE_PKT_TX_GPD_NUM, + NPS_CFG_TYPE_PKT_RX_GPD_NUM, + NPS_CFG_TYPE_PKT_RX_SCHED_MODE, /* 0: RR mode, 1: WRR mode */ + NPS_CFG_TYPE_PKT_TX_QUEUE_LEN, + NPS_CFG_TYPE_PKT_RX_QUEUE_LEN, + NPS_CFG_TYPE_PKT_RX_QUEUE_WEIGHT, /* valid while NPS_CFG_TYPE_PKT_RX_SCHED_MODE is 1 + * param0: queue + * param1: NA + * value : weight + */ + NPS_CFG_TYPE_PKT_RX_ISR_THREAD_PRI, + NPS_CFG_TYPE_PKT_RX_ISR_THREAD_STACK, /* customize PKT RX ISR thread stack size in bytes */ + NPS_CFG_TYPE_PKT_RX_FREE_THREAD_PRI, + NPS_CFG_TYPE_PKT_RX_FREE_THREAD_STACK, /* customize PKT RX free thread stack size in bytes */ + NPS_CFG_TYPE_PKT_TX_ISR_THREAD_PRI, + NPS_CFG_TYPE_PKT_TX_ISR_THREAD_STACK, /* customize PKT TX ISR thread stack size in bytes */ + NPS_CFG_TYPE_PKT_TX_FREE_THREAD_PRI, + NPS_CFG_TYPE_PKT_TX_FREE_THREAD_STACK, /* customize PKT TX free thread stack size in bytes */ + NPS_CFG_TYPE_PKT_ERROR_ISR_THREAD_PRI, + NPS_CFG_TYPE_PKT_ERROR_ISR_THREAD_STACK, /* customize PKT ERROR ISR thread stack size in bytes */ + + /* STAT module related configuration */ + NPS_CFG_TYPE_CNT_THREAD_PRI, + NPS_CFG_TYPE_CNT_THREAD_STACK, /* customize CNT thread stack size in bytes */ + + /* IFMON module related configuration */ + NPS_CFG_TYPE_IFMON_THREAD_PRI, + NPS_CFG_TYPE_IFMON_THREAD_STACK, /* customize IFMON thread stack size in bytes */ + + /* share memory related configuration */ + NPS_CFG_TYPE_SHARE_MEM_SDN_ENTRY_NUM, /* SDN flow table entry number from share memory */ + NPS_CFG_TYPE_SHARE_MEM_L3_ENTRY_NUM, /* L3 entry number from share memory */ + NPS_CFG_TYPE_SHARE_MEM_L2_ENTRY_NUM, /* L2 entry number from share memory */ + + /* DLB related configuration */ + NPS_CFG_TYPE_DLB_MONITOR_MODE, /* DLB monitor mode. 1: async, 0: sync */ + NPS_CFG_TYPE_DLB_LAG_MONITOR_THREAD_PRI, + NPS_CFG_TYPE_DLB_LAG_MONITOR_THREAD_SLEEP_TIME, + NPS_CFG_TYPE_DLB_L3_MONITOR_THREAD_PRI, + NPS_CFG_TYPE_DLB_L3_MONITOR_THREAD_SLEEP_TIME, + NPS_CFG_TYPE_DLB_L3_INTR_THREAD_PRI, + NPS_CFG_TYPE_DLB_NVO3_MONITOR_THREAD_PRI, + NPS_CFG_TYPE_DLB_NVO3_MONITOR_THREAD_SLEEP_TIME, + NPS_CFG_TYPE_DLB_NVO3_INTR_THREAD_PRI, + + /* l3 related configuration */ + NPS_CFG_TYPE_L3_ECMP_MIN_BLOCK_SIZE, + NPS_CFG_TYPE_L3_ECMP_BLOCK_SIZE, + NPS_CFG_TYPE_TCAM_L3_WITH_IPV6_PREFIX_128_REGION_ENTRY_NUM, + NPS_CFG_TYPE_TCAM_L3_WITH_IPV6_PREFIX_64_REGION_ENTRY_NUM, + + /* share memory related configuration */ + NPS_CFG_TYPE_HASH_L2_FDB_REGION_ENTRY_NUM, + NPS_CFG_TYPE_HASH_L2_GROUP_REGION_ENTRY_NUM, + NPS_CFG_TYPE_HASH_SECURITY_REGION_ENTRY_NUM, + NPS_CFG_TYPE_HASH_L3_WITH_IPV6_PREFIX_128_REGION_ENTRY_NUM, + NPS_CFG_TYPE_HASH_L3_WITH_IPV6_PREFIX_64_REGION_ENTRY_NUM, + NPS_CFG_TYPE_HASH_L3_WITHOUT_PREFIX_REGION_ENTRY_NUM, + NPS_CFG_TYPE_HASH_L3_RPF_REGION_ENTRY_NUM, + NPS_CFG_TYPE_HASH_FLOW_REGION_ENTRY_NUM, + + NPS_CFG_TYPE_PORT_FC_MODE, /* only use to init port TM buffer + * configuration for specific FC mode, + * which not enable/disable FC/PFC + * for the port/pcp. + * param0: port. + * param1: Invalid. + * value : 0, FC disable; + * 1, 802.3x FC; + * 2, PFC. + */ + NPS_CFG_TYPE_PORT_PFC_STATE, /* valid while NPS_CFG_TYPE_PORT_TYPE_FC_MODE + * of the port is PFC. + * param0: port. + * param1: pcp. + * value : 0, PFC disable; + * 1, PFC enable. + */ + NPS_CFG_TYPE_PORT_PFC_QUEUE_STATE, /* valid while NPS_CFG_TYPE_PORT_TYPE_FC_MODE + * of the port is PFC. + * param0: port. + * param1: queue. + * value : 0, PFC disable; + * 1, PFC enable; + */ + NPS_CFG_TYPE_PORT_PFC_MAPPING, /* valid while NPS_CFG_TYPE_PORT_FC_MODE + * of the port is PFC. + * param0: port. + * param1: queue. + * value : PCP bitmap; + * + */ + NPS_CFG_TYPE_TRILL_ENABLE, /* TRILL module related configuration */ + NPS_CFG_TYPE_USE_UNIT_PORT, /* use UNIT_PORT or native port of NPS_PORT_T + * 1 : UNIT_PORT, 0 : native port + */ + NPS_CFG_TYPE_MAC_VLAN_ENABLE, /* use dadicate mac vlan table */ + NPS_CFG_TYPE_CPI_PORT_MODE, /* use to init CPI port working mode. + * param0: CPI port number. + * param1: NA. + * value : 0, CPI mode. + * 1, Ether mode. + */ + NPS_CFG_TYPE_PHY_ADDR, + NPS_CFG_TYPE_LED_CFG, + NPS_CFG_TYPE_USER_BUF_CTRL, + NPS_CFG_TYPE_ARIES_SDP_MODE, /* Select which Aries parser to use + * value: 0, GTP (default) + * 1, PPPOE + * 2, TCP_SPLICING + */ + NPS_CFG_TYPE_FAIR_BUF_CTRL, /* to enable the fairness in flow-control traffic. + * value : 0, disable fairness. + * 1, enable fairness. + */ + NPS_CFG_TYPE_HRM_BUF_SIZE, /* to assign the head room size of port speed. + * param0: Port speed. + * 0, 1G (default) + * 1, 10G + * 2, 25G + * 3, 40G + * 4, 50G + * 5, 100G + * value : cell number. + */ + NPS_CFG_TYPE_STEERING_TRUNCATE_ENABLE, /* set value 0: Do not truncate steering packets. + * set value 1: steering packets will be trucated to 1 cell and + * the cell size is based on chip. + */ + NPS_CFG_TYPE_FABRIC_MODE_ENABLE, /* set value 0: Non-farbic chip mode. (default) + * set value 1: Fabric chip mode. + */ + NPS_CFG_TYPE_ACL_TCP_FLAGS_ENCODE_ENABLE, /* set value 0: Do not encode tcp flags at acl entry. + * (Can only match bit 0-6 of tcp flags.) + * set value 1: Encode tcp flags at acl entry. (default) + */ + NPS_CFG_TYPE_TCAM_ECC_SCAN_ENABLE, /* set value 0: Disable ECC TCAM scanning. (default) + * set value 1: Enable ECC TCAM scanning. + */ + NPS_CFG_TYPE_PORT_BUF_MAX, /* + * Port max buffer threshold and unit is cell count. + * param0: port. + * param1: 0, ingress; + * 1, egress. + * value : 0, disable; + * others, enable max threshold. + */ + NPS_CFG_TYPE_INGRESS_DYNAMIC_BUF, /* + * Queue dynamic alpha setting and value will be + * enlarge to multiple of 256. For example, set value + * as 16 to indicate alpha as 1/16. Set value + * as 256 to indicate alpha as 1. + * param0: port. + * param1: queue (0~7: sc). + * value : alpha * 256. + */ + NPS_CFG_TYPE_EGRESS_DYNAMIC_BUF, /* + * Queue dynamic alpha setting and value will be + * enlarge to multiple of 256. For example, set value + * as 16 to indicate alpha as 1/16. Set value + * as 256 to indicate alpha as 1. + * param0: port. + * param1: queue (0~7: uc, 8~15: mc). + * value : alpha * 256. + */ + + + NPS_CFG_TYPE_DCQCN_ENABLE, /* set value 0: Disable DCQCN. (default) + * set value 1: Enable DCQCN. + */ + + + NPS_CFG_TYPE_LAST + +}NPS_CFG_TYPE_T; + +typedef struct NPS_CFG_VALUE_S +{ + UI32_T param0; /*(Optional) The optional parameter which is available + * when the NPS_CFG_TYPE_T needs the first arguments*/ + UI32_T param1; /*(Optional) The optional parameter which is available + * when the NPS_CFG_TYPE_T needs the second arguments*/ + I32_T value; + +}NPS_CFG_VALUE_T; + +typedef NPS_ERROR_NO_T + (*NPS_CFG_GET_FUNC_T)( + const UI32_T unit, + const NPS_CFG_TYPE_T cfg_type, + NPS_CFG_VALUE_T *ptr_cfg_value); + +typedef NPS_ERROR_NO_T + (*NPS_CFG_GET_LED_FUNC_T) +( + const UI32_T unit, + UI32_T **pptr_led_cfg, + UI32_T *ptr_cfg_size); + +/* EXPORTED SUBPROGRAM SPECIFICATIONS + */ + +/* FUNCTION NAME: nps_cfg_register + * PURPOSE: + * The function is to register NPS_CFG_GET_FUNC to SDK. + * + * INPUT: + * unit -- Device unit number. + * ptr_cfg_callback -- function to get the configuration value. + * + * OUTPUT: + * None + * + * RETURN: + * NPS_E_OK -- Operate success. + * NPS_E_BAD_PARAMETER -- Bad parameter. + * + * NOTES: + * 1. During SDK initializtion, it will call registered NPS_CFG_GET_FUNC to get configuration + * and apply them. + * If No registered NPS_CFG_GET_FUNC or can not get specified NPS_CFG_TYPE_T + * configuration, SDK will apply default setting. + * 2. This function should be called before calling nps_init + */ +NPS_ERROR_NO_T +nps_cfg_register( + const UI32_T unit, + NPS_CFG_GET_FUNC_T ptr_cfg_callback); + +/* FUNCTION NAME: nps_cfg_led_register + * PURPOSE: + * The function is to register NPS_CFG_GET_FUNC to SDK. + * + * INPUT: + * unit -- Device unit number. + * ptr_led_cfg_callback -- function to get LED configuration array. + * + * OUTPUT: + * None + * + * RETURN: + * NPS_E_OK -- Operate success. + * NPS_E_BAD_PARAMETER -- Bad parameter. + * + * NOTES: + * 1. During SDK initializtion, it will call registered NPS_CFG_GET_FUNC to get configuration + * and apply them. + * If No registered NPS_CFG_GET_LED_FUNC or can not get specified external LED cfg + * configuration, SDK will apply default setting. + * 2. This function should be called before calling nps_init + */ +NPS_ERROR_NO_T +nps_cfg_led_register( + const UI32_T unit, + NPS_CFG_GET_LED_FUNC_T ptr_led_cfg_callback); + +#endif /* NPS_CFG_H */ diff --git a/platform/nephos/nephos-modules/modules/src/inc/nps_error.h b/platform/nephos/nephos-modules/modules/src/inc/nps_error.h new file mode 100755 index 000000000000..a31ed4ac84b7 --- /dev/null +++ b/platform/nephos/nephos-modules/modules/src/inc/nps_error.h @@ -0,0 +1,76 @@ +/* Copyright (C) 2019 Nephos, Inc. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of version 2 of the GNU General Public + * License as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * version 2 along with this program. + */ + +/* FILE NAME: nps_error.h + * PURPOSE: + * Define the generic error code on NPS SDK. + * NOTES: + */ + +#ifndef NPS_ERROR_H +#define NPS_ERROR_H + +/* INCLUDE FILE DECLARATIONS + */ +#include + + +/* NAMING CONSTANT DECLARATIONS + */ + +/* MACRO FUNCTION DECLARATIONS + */ + +/* DATA TYPE DECLARATIONS + */ + +typedef enum +{ + NPS_E_OK = 0, /* Ok and no error */ + NPS_E_BAD_PARAMETER, /* Parameter is wrong */ + NPS_E_NO_MEMORY, /* No memory is available */ + NPS_E_TABLE_FULL, /* Table is full */ + NPS_E_ENTRY_NOT_FOUND, /* Entry is not found */ + NPS_E_ENTRY_EXISTS, /* Entry already exists */ + NPS_E_NOT_SUPPORT, /* Feature is not supported */ + NPS_E_ALREADY_INITED, /* Module is reinitialized */ + NPS_E_NOT_INITED, /* Module is not initialized */ + NPS_E_OTHERS, /* Other errors */ + NPS_E_ENTRY_IN_USE, /* Entry is in use */ + NPS_E_LAST +} NPS_ERROR_NO_T; + +/* EXPORTED SUBPROGRAM SPECIFICATIONS + */ +/* FUNCTION NAME: nps_error_getString + * PURPOSE: + * To obtain the error string of the specified error code + * + * INPUT: + * cause -- The specified error code + * OUTPUT: + * None + * RETURN: + * Pointer to the target error string + * + * NOTES: + * + * + */ +C8_T * +nps_error_getString( + const NPS_ERROR_NO_T cause ); + +#endif /* NPS_ERROR_H */ diff --git a/platform/nephos/nephos-modules/modules/src/inc/nps_types.h b/platform/nephos/nephos-modules/modules/src/inc/nps_types.h new file mode 100755 index 000000000000..5630b521404e --- /dev/null +++ b/platform/nephos/nephos-modules/modules/src/inc/nps_types.h @@ -0,0 +1,308 @@ +/* Copyright (C) 2019 Nephos, Inc. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of version 2 of the GNU General Public + * License as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * version 2 along with this program. + */ + +/* FILE NAME: nps_types.h + * PURPOSE: + * Define the commom data type in NPS SDK. + * NOTES: + */ + +#ifndef NPS_TYPES_H +#define NPS_TYPES_H + +/* INCLUDE FILE DECLARATIONS + */ + +#include + +/* NAMING CONSTANT DECLARATIONS + */ + +#define NPS_BIT_OFF 0 +#define NPS_BIT_ON 1 + +#define NPS_PORT_INVALID (0xFFFFFFFF) +#define NPS_SEG_INVALID (0xFFFFFFFF) + +/* for CPU Rx packet, indicate that the packet + * is not received from remote switch + */ +#define NPS_PATH_INVALID (0xFFFFFFFF) + + +#define NPS_SEMAPHORE_BINARY (1) +#define NPS_SEMAPHORE_SYNC (0) +#define NPS_SEMAPHORE_WAIT_FOREVER (0xFFFFFFFF) + +/* MACRO FUNCTION DECLARATIONS + */ +#if defined(NPS_EN_HOST_32_BIT_BIG_ENDIAN) || defined(NPS_EN_HOST_32_BIT_LITTLE_ENDIAN) +typedef unsigned int NPS_HUGE_T; +#elif defined(NPS_EN_HOST_64_BIT_BIG_ENDIAN) || defined(NPS_EN_HOST_64_BIT_LITTLE_ENDIAN) +typedef unsigned long long int NPS_HUGE_T; +#else +#error "The 32bit and 64bit compatible data type are not defined !!" +#endif + +#if defined(NPS_EN_64BIT_ADDR) +typedef unsigned long long int NPS_ADDR_T; +#else +typedef NPS_HUGE_T NPS_ADDR_T; +#endif + +#if defined(NPS_EN_HOST_64_BIT_BIG_ENDIAN) || defined(NPS_EN_HOST_64_BIT_LITTLE_ENDIAN) || defined(NPS_EN_64BIT_ADDR) +#define NPS_ADDR_64_HI(__addr__) ((__addr__) >> 32) +#define NPS_ADDR_64_LOW(__addr__) ((__addr__) & 0xFFFFFFFF) +#define NPS_ADDR_32_TO_64(__hi32__,__low32__) (((unsigned long long int)(__low32__)) | \ + (((unsigned long long int)(__hi32__)) << 32)) +#else +#define NPS_ADDR_64_HI(__addr__) (0) +#define NPS_ADDR_64_LOW(__addr__) (__addr__) +#define NPS_ADDR_32_TO_64(__hi32__,__low32__) (__low32__) +#endif + +#define NPS_BITMAP_SIZE(bit_num) ((((bit_num) - 1) / 32) + 1) +#define NPS_IPV4_IS_MULTICAST(addr) (0xE0000000 == ((addr) & 0xF0000000)) +#define NPS_IPV6_IS_MULTICAST(addr) (0xFF == (((UI8_T *)(addr))[0])) +#define NPS_MAC_IS_MULTICAST(mac) ((mac[0]) & (0x1)) + +/* DATA TYPE DECLARATIONS + */ +typedef UI8_T NPS_BIT_MASK_8_T; +typedef UI16_T NPS_BIT_MASK_16_T; +typedef UI32_T NPS_BIT_MASK_32_T; +typedef UI64_T NPS_BIT_MASK_64_T; + +typedef UI8_T NPS_MAC_T[6]; +typedef UI32_T NPS_IPV4_T; +typedef UI8_T NPS_IPV6_T[16]; + +typedef UI32_T NPS_TIME_T; + +/* Bridge Domain id data type. */ +typedef UI32_T NPS_BRIDGE_DOMAIN_T; + +/* TRILL nickname type. */ +typedef UI16_T NPS_TRILL_NICKNAME_T; + +typedef union NPS_IP_U +{ + + NPS_IPV4_T ipv4_addr; + NPS_IPV6_T ipv6_addr; + +}NPS_IP_T; + +typedef struct NPS_IP_ADDR_S +{ + NPS_IP_T ip_addr; + BOOL_T ipv4 ; +}NPS_IP_ADDR_T; + +/* Tunnel type*/ +typedef enum +{ + NPS_TUNNEL_TYPE_IPV4INIPV4 = 0, /* RFC2003, IPv4-in-IPv4 tunnel */ + NPS_TUNNEL_TYPE_IPV4INIPV6, /* RFC2003, IPv4-in-IPv6 tunnel */ + NPS_TUNNEL_TYPE_IPV6INIPV4, /* RFC2003, IPv6-in-IPv4 tunnel */ + NPS_TUNNEL_TYPE_IPV6INIPV6, /* RFC2003, IPv6-in-IPv6 tunnel */ + NPS_TUNNEL_TYPE_GREIPV4INIPV4, /* RFC2784/RFC2890,GRE IPv4-in-IPv4 tunnel */ + NPS_TUNNEL_TYPE_GREIPV6INIPV4, /* RFC2784/RFC2890,GRE IPv6-in-IPv4 tunnel */ + NPS_TUNNEL_TYPE_GREIPV4INIPV6, /* RFC2784/RFC2890,GRE IPv4-in-IPv6 tunnel */ + NPS_TUNNEL_TYPE_GREIPV6INIPV6, /* RFC2784/RFC2890,GRE IPv6-in-IPv6 tunnel */ + NPS_TUNNEL_TYPE_GRE_NSH, + NPS_TUNNEL_TYPE_6TO4, /* RFC3056, 6to4 tunnel*/ + NPS_TUNNEL_TYPE_ISATAP, /* RFC5214, ISATAP tunnel */ + NPS_TUNNEL_TYPE_NVGRE_L2, + NPS_TUNNEL_TYPE_NVGRE_V4, + NPS_TUNNEL_TYPE_NVGRE_V6, + NPS_TUNNEL_TYPE_NVGRE_NSH, + NPS_TUNNEL_TYPE_VXLAN, + NPS_TUNNEL_TYPE_GTP_V4, + NPS_TUNNEL_TYPE_GTP_V6, + NPS_TUNNEL_TYPE_MPLSINGRE, + NPS_TUNNEL_TYPE_VXLANGPE_L2, + NPS_TUNNEL_TYPE_VXLANGPE_V4, + NPS_TUNNEL_TYPE_VXLANGPE_V6, + NPS_TUNNEL_TYPE_VXLANGPE_NSH, + NPS_TUNNEL_TYPE_FLEX0_L2, + NPS_TUNNEL_TYPE_FLEX0_V4, + NPS_TUNNEL_TYPE_FLEX0_V6, + NPS_TUNNEL_TYPE_FLEX0_NSH, + NPS_TUNNEL_TYPE_FLEX1_L2, + NPS_TUNNEL_TYPE_FLEX1_V4, + NPS_TUNNEL_TYPE_FLEX1_V6, + NPS_TUNNEL_TYPE_FLEX1_NSH, + NPS_TUNNEL_TYPE_FLEX2_L2, + NPS_TUNNEL_TYPE_FLEX2_V4, + NPS_TUNNEL_TYPE_FLEX2_V6, + NPS_TUNNEL_TYPE_FLEX2_NSH, + NPS_TUNNEL_TYPE_FLEX3_L2, + NPS_TUNNEL_TYPE_FLEX3_V4, + NPS_TUNNEL_TYPE_FLEX3_V6, + NPS_TUNNEL_TYPE_FLEX3_NSH, + NPS_TUNNEL_TYPE_LAST +} NPS_TUNNEL_TYPE_T; + +/* tunnel key */ +typedef struct NPS_TUNNEL_KEY_S +{ + NPS_IP_ADDR_T src_ip; /* key: The outer source IP address used by tunnel encapsulation.*/ + NPS_IP_ADDR_T dst_ip; /* key: The outer destination IP address used by tunnel encapsulation. + * For automatic tunnel, this is not required. If not specified, + * its ip address value must be set to 0, but the IP version + * must be same with src_ip. + */ + NPS_TUNNEL_TYPE_T tunnel_type; /*key: The tunnel type.*/ +}NPS_TUNNEL_KEY_T; + +typedef UI16_T NPS_VLAN_T; +typedef UI32_T NPS_PORT_T; + +typedef enum{ + NPS_PORT_TYPE_NORMAL = 0, + NPS_PORT_TYPE_UNIT_PORT, + NPS_PORT_TYPE_LAG, + NPS_PORT_TYPE_VM_ETAG, + NPS_PORT_TYPE_VM_VNTAG, + NPS_PORT_TYPE_VM_VEPA, + NPS_PORT_TYPE_FCOE, + NPS_PORT_TYPE_IP_TUNNEL, + NPS_PORT_TYPE_TRILL, + NPS_PORT_TYPE_MPLS, + NPS_PORT_TYPE_MPLS_PW, + NPS_PORT_TYPE_CPU_PORT, + NPS_PORT_TYPE_SFC, + NPS_PORT_TYPE_LAST +}NPS_PORT_TYPE_T; + +/*support Green/Yellow/Red color*/ +typedef enum +{ + NPS_COLOR_GREEN = 0, + NPS_COLOR_YELLOW, + NPS_COLOR_RED, + NPS_COLOR_LAST +}NPS_COLOR_T; +typedef enum +{ + NPS_FWD_ACTION_FLOOD = 0, + NPS_FWD_ACTION_NORMAL, + NPS_FWD_ACTION_DROP, + NPS_FWD_ACTION_COPY_TO_CPU, + NPS_FWD_ACTION_REDIRECT_TO_CPU, + NPS_FWD_ACTION_FLOOD_COPY_TO_CPU, + NPS_FWD_ACTION_DROP_COPY_TO_CPU, + NPS_FWD_ACTION_LAST +} NPS_FWD_ACTION_T; + +typedef NPS_HUGE_T NPS_THREAD_ID_T; +typedef NPS_HUGE_T NPS_SEMAPHORE_ID_T; +typedef NPS_HUGE_T NPS_ISRLOCK_ID_T; +typedef NPS_HUGE_T NPS_IRQ_FLAGS_T; + +typedef enum +{ + NPS_DIR_INGRESS = 0, + NPS_DIR_EGRESS, + NPS_DIR_BOTH, + NPS_DIR_LAST +}NPS_DIR_T; + +typedef enum +{ + NPS_VLAN_ACTION_SET, + NPS_VLAN_ACTION_KEEP, + NPS_VLAN_ACTION_REMOVE, + NPS_VLAN_ACTION_LAST +} NPS_VLAN_ACTION_T; + +/* VLAN Precedence */ +/* 000 = SUBNET_PROTOCOL_MAC_PORT + * 001 = SUBNET_MAC_PROTOCOL_PORT + * 010 = PROTOCOL_SUBNET_MAC_PORT + * 011 = PROTOCOL_MAC_SUBNET_PORT + * 100 = MAC_SUBNET_PROTOCOL_PORT + * 101 = MAC_PROTOCOL_SUBNET_PORT + */ +typedef enum +{ + NPS_VLAN_PRECEDENCE_SUBNET_MAC_PROTOCOL_PORT = 1, + NPS_VLAN_PRECEDENCE_MAC_SUBNET_PROTOCOL_PORT = 4, + NPS_VLAN_PRECEDENCE_PORT_ONLY = 7, + NPS_VLAN_PRECEDENCE_FAVOR_TYPE = 8, + NPS_VLAN_PRECEDENCE_FAVOR_ADDR = 9, + NPS_VLAN_PRECEDENCE_LAST +} NPS_VLAN_PRECEDENCE_T; + +/* VLAN Tag Type */ +typedef enum +{ + NPS_VLAN_TAG_NONE = 0, /* UnTag */ + NPS_VLAN_TAG_SINGLE_PRI, /* Single Customer/Service Priority Tag */ + NPS_VLAN_TAG_SINGLE, /* Single Customer/Service Tag */ + NPS_VLAN_TAG_DOUBLE_PRI, /* Double Tag with any VID=0 */ + NPS_VLAN_TAG_DOUBLE, /* Double Tag */ + NPS_VLAN_TAG_LAST +} NPS_VLAN_TAG_T; + +typedef struct NPS_BUM_INFO_S +{ + UI32_T mcast_id; + UI32_T group_label; /* l2 da group label */ + UI32_T vid; /* used when FLAGS_ADD_VID is set */ + +#define NPS_BUM_INFO_FLAGS_MCAST_VALID (1 << 0) +#define NPS_BUM_INFO_FLAGS_TO_CPU (1 << 1) +#define NPS_BUM_INFO_FLAGS_ADD_VID (1 << 2) /* single tag to double tag (i.e) QinQ */ +#define NPS_BUM_INFO_FLAGS_TRILL_ALL_TREE (1 << 3) + UI32_T flags; +} NPS_BUM_INFO_T; + +typedef enum +{ + NPS_PHY_TYPE_INTERNAL = 0x0, + NPS_PHY_TYPE_EXTERNAL, + NPS_PHY_TYPE_LAST +} NPS_PHY_TYPE_T; + +typedef enum +{ + NPS_PHY_DEVICE_ADDR_PMA_PMD = 1, + NPS_PHY_DEVICE_ADDR_WIS = 2, + NPS_PHY_DEVICE_ADDR_PCS = 3, + NPS_PHY_DEVICE_ADDR_PHY_XS = 4, + NPS_PHY_DEVICE_ADDR_DTE_XS = 5, + NPS_PHY_DEVICE_ADDR_TC = 6, + NPS_PHY_DEVICE_ADDR_AN = 7, + NPS_PHY_DEVICE_ADDR_VENDOR_1 = 30, + NPS_PHY_DEVICE_ADDR_VENDOR_2 = 31, + NPS_PHY_DEVICE_ADDR_LAST +} NPS_PHY_DEVICE_ADDR_T; + +typedef struct NPS_RANGE_INFO_S +{ + UI32_T min_id; + UI32_T max_id; + UI32_T max_member_cnt; + +#define NPS_RANGE_INFO_FLAGS_MAX_MEMBER_CNT (1 << 0) + UI32_T flags; +} NPS_RANGE_INFO_T; + +/* EXPORTED SUBPROGRAM SPECIFICATIONS + */ + +#endif /* NPS_TYPES_H */ diff --git a/platform/nephos/nephos-modules/modules/src/inc/osal_mdc.h b/platform/nephos/nephos-modules/modules/src/inc/osal_mdc.h new file mode 100755 index 000000000000..47971bb38c8d --- /dev/null +++ b/platform/nephos/nephos-modules/modules/src/inc/osal_mdc.h @@ -0,0 +1,241 @@ +/* Copyright (C) 2019 Nephos, Inc. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of version 2 of the GNU General Public + * License as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * version 2 along with this program. + */ + +/* FILE NAME: osal_mdc.h + * PURPOSE: + * 1. Provide device operate from AML interface + * NOTES: + * + */ + +#ifndef OSAL_MDC_H +#define OSAL_MDC_H + +/* INCLUDE FILE DECLARATIONS */ +#include +#include + +#define OSAL_MDC_DRIVER_NAME "nps_dev" +#define OSAL_MDC_DRIVER_MISC_MAJOR_NUM (10) +#define OSAL_MDC_DRIVER_MISC_MINOR_NUM (250) +#define OSAL_MDC_PCI_BUS_WIDTH (4) + +#define OSAL_MDC_DMA_LIST_SZ_UNLIMITED (0) +#define OSAL_MDC_DMA_LIST_NAME "RSRV_DMA" +#define OSAL_MDC_DMA_SEMAPHORE_NAME "DMALIST" + +/* NAMING CONSTANT DECLARATIONS + */ + +/* linked list node */ +#if defined(NPS_LINUX_KERNEL_MODE) + +typedef struct OSAL_MDC_LIST_NODE_S +{ + void *ptr_data; /* node data */ + struct OSAL_MDC_LIST_NODE_S *ptr_next; /* point to next link node */ + struct OSAL_MDC_LIST_NODE_S *ptr_prev; /* point to previous link node */ +} OSAL_MDC_LIST_NODE_T; + +/* linked list head */ +typedef struct OSAL_MDC_LIST_S +{ + OSAL_MDC_LIST_NODE_T *ptr_head_node; /* linked list head node */ + OSAL_MDC_LIST_NODE_T *ptr_tail_node; /* linked list tail node */ + UI32_T capacity; /* max count of nodes in list + * size=0: the capacity is unlimited. + * size>0: the capacity is limited. + */ + UI32_T node_cnt; /* the count of nodes in the list */ +} OSAL_MDC_LIST_T; + +#endif /* End of defined(NPS_LINUX_KERNEL_MODE) */ + +typedef struct +{ + NPS_ADDR_T phy_addr; + void *ptr_virt_addr; + NPS_ADDR_T size; + +#if defined(NPS_EN_DMA_RESERVED) + BOOL_T available; +#endif + +} OSAL_MDC_DMA_NODE_T; + +typedef struct +{ +#if defined(NPS_EN_DMA_RESERVED) + void *ptr_rsrv_virt_addr; + NPS_ADDR_T rsrv_phy_addr; + NPS_ADDR_T rsrv_size; +#else + struct device *ptr_dma_dev; /* for allocate/free system memory */ +#endif + void *ptr_dma_list; /* the type should be casted again when use */ + NPS_SEMAPHORE_ID_T sema; + +} OSAL_MDC_DMA_INFO_T; + +#if defined(NPS_LINUX_USER_MODE) + +/* Data type of IOCTL argument for DMA management */ +typedef struct +{ +#if defined(NPS_EN_DMA_RESERVED) + NPS_ADDR_T rsrv_dma_phy_addr; /* information of reserved memory */ + NPS_ADDR_T rsrv_dma_size; +#else + NPS_ADDR_T phy_addr; /* information of system memory */ + NPS_ADDR_T size; +#endif +} OSAL_MDC_IOCTL_DMA_DATA_T; + +/* Data type of IOCTL argument for device initialization */ +#pragma pack (push,1) +typedef struct +{ + AML_DEV_ID_T id[NPS_CFG_MAXIMUM_CHIPS_PER_SYSTEM]; + NPS_ADDR_T pci_mmio_phy_start[NPS_CFG_MAXIMUM_CHIPS_PER_SYSTEM]; + NPS_ADDR_T pci_mmio_size[NPS_CFG_MAXIMUM_CHIPS_PER_SYSTEM]; + UI32_T dev_num; +} OSAL_MDC_IOCTL_DEV_DATA_T; +#pragma pack (pop) + +typedef enum +{ + OSAL_MDC_IOCTL_ACCESS_READ = 0, + OSAL_MDC_IOCTL_ACCESS_WRITE, + OSAL_MDC_IOCTL_ACCESS_READ_WRITE, + OSAL_MDC_IOCTL_ACCESS_NONE, + OSAL_MDC_IOCTL_ACCESS_LAST + +} OSAL_MDC_IOCTL_ACCESS_T; + +typedef enum +{ + OSAL_MDC_IOCTL_TYPE_MDC_INIT_DEV = 0, + OSAL_MDC_IOCTL_TYPE_MDC_DEINIT_DEV, + OSAL_MDC_IOCTL_TYPE_MDC_INIT_RSRV_DMA_MEM, + OSAL_MDC_IOCTL_TYPE_MDC_DEINIT_RSRV_DMA_MEM, + OSAL_MDC_IOCTL_TYPE_MDC_ALLOC_SYS_DMA_MEM, + OSAL_MDC_IOCTL_TYPE_MDC_FREE_SYS_DMA_MEM, + OSAL_MDC_IOCTL_TYPE_MDC_CONNECT_ISR, + OSAL_MDC_IOCTL_TYPE_MDC_DISCONNECT_ISR, + OSAL_MDC_IOCTL_TYPE_LAST + +} OSAL_MDC_IOCTL_TYPE_T; + +typedef union +{ + UI32_T value; + struct + { + UI32_T access : 2; /* 0:read, 1:write, 2:read and write, 3:none */ + UI32_T unit : 6; /* Maximum unit number is 64. */ + UI32_T size :14; /* Maximum IOCTL data size is 16KB. */ + UI32_T type :10; /* Maximum 1024 IOCTL types */ + } field; +} OSAL_MDC_IOCTL_CMD_T; + +typedef NPS_ERROR_NO_T +(*OSAL_MDC_IOCTL_CALLBACK_FUNC_T)( + const UI32_T unit, + void *ptr_data); + +#endif /* End of NPS_LINUX_USER_MODE */ + + +/* MACRO FUNCTION DECLARATIONS + */ + +/* DATA TYPE DECLARATIONS + */ + +/* EXPORTED SUBPROGRAM SPECIFICATIONS + */ +NPS_ERROR_NO_T +osal_mdc_readPciReg( + const UI32_T unit, + const UI32_T offset, + UI32_T *ptr_data, + const UI32_T len); + +NPS_ERROR_NO_T +osal_mdc_writePciReg( + const UI32_T unit, + const UI32_T offset, + const UI32_T *ptr_data, + const UI32_T len); + +NPS_ERROR_NO_T +osal_mdc_initDevice( + AML_DEV_T *ptr_dev_list, + UI32_T *ptr_dev_num); + +NPS_ERROR_NO_T +osal_mdc_deinitDevice(void); + +NPS_ERROR_NO_T +osal_mdc_initDmaMem(void); + +NPS_ERROR_NO_T +osal_mdc_deinitDmaMem(void); + +void * +osal_mdc_allocDmaMem( + const UI32_T size); + +NPS_ERROR_NO_T +osal_mdc_freeDmaMem( + void *ptr_virt_addr); + +NPS_ERROR_NO_T +osal_mdc_convertVirtToPhy( + void *ptr_virt_addr, + NPS_ADDR_T *ptr_phy_addr); + +NPS_ERROR_NO_T +osal_mdc_convertPhyToVirt( + const NPS_ADDR_T phy_addr, + void **pptr_virt_addr); + +NPS_ERROR_NO_T +osal_mdc_registerIsr( + const UI32_T unit, + AML_DEV_ISR_FUNC_T handler, + void *ptr_cookie); + +NPS_ERROR_NO_T +osal_mdc_connectIsr( + const UI32_T unit, + AML_DEV_ISR_FUNC_T handler, + AML_DEV_ISR_DATA_T *ptr_cookie); + +NPS_ERROR_NO_T +osal_mdc_disconnectIsr( + const UI32_T unit); + +NPS_ERROR_NO_T +osal_mdc_flushCache( + void *ptr_virt_addr, + const UI32_T size); + +NPS_ERROR_NO_T +osal_mdc_invalidateCache( + void *ptr_virt_addr, + const UI32_T size); + +#endif /* OSAL_MDC_H */ diff --git a/platform/nephos/nephos-modules/modules/src/inc/osal_types.h b/platform/nephos/nephos-modules/modules/src/inc/osal_types.h new file mode 100755 index 000000000000..48ac58aba335 --- /dev/null +++ b/platform/nephos/nephos-modules/modules/src/inc/osal_types.h @@ -0,0 +1,319 @@ +/* Copyright (C) 2019 Nephos, Inc. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of version 2 of the GNU General Public + * License as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * version 2 along with this program. + */ + +/* FILE NAME: osal_types.h + * PURPOSE: + * Define the commom data type in NPS SDK. + * NOTES: + */ + +#ifndef OSAL_TYPES_H +#define OSAL_TYPES_H + +/* INCLUDE FILE DECLARATIONS + */ + +/* NAMING CONSTANT DECLARATIONS + */ + + +#ifndef FALSE +#define FALSE 0 +#endif + +#ifndef TRUE +#define TRUE 1 +#endif + +#ifndef NULL +#define NULL (void *)0 +#endif + + +#if defined(NPS_EN_HOST_64_BIT_BIG_ENDIAN) +#define UI64_MSW 0 +#define UI64_LSW 1 +#elif defined(NPS_EN_HOST_64_BIT_LITTLE_ENDIAN) +#define UI64_MSW 1 +#define UI64_LSW 0 +#else +#define UI64_MSW 1 +#define UI64_LSW 0 +#endif + +#if defined(NPS_EN_COMPILER_SUPPORT_LONG_LONG) +#define UI64_HI(dst) ((UI32_T)((dst) >> 32)) +#define UI64_LOW(dst) ((UI32_T)((dst) & 0xffffffff)) +#define UI64_ASSIGN(dst, high, low) ((dst) = ((long long)(high) << 32 | (long long)(low))) +#define UI64_SET(dst, src) ((dst) = (src)) +#define UI64_ADD_UI32(dst, src) ((dst) += ((long long)(src))) +#define UI64_SUB_UI32(dst, src) ((dst) -= ((long long)(src))) +#define UI64_ADD_UI64(dst, src) ((dst) += (src)) +#define UI64_SUB_UI64(dst, src) ((dst) -= (src)) +#define UI64_AND(dst, src) ((dst) &= (src)) +#define UI64_OR(dst, src) ((dst) |= (src)) +#define UI64_XOR(dst, src) ((dst) ^= (src)) +#define UI64_NOT(dst) ((dst) = ~(dst)) +#define UI64_MULT_UI32(dst, src) ((dst) *= (src)) +#define UI64_MULT_UI64(dst, src) ((dst) *= (src)) +/* UI64_T type data comparion: + * if data1 > data2 return 1 + * if data1 < data2 return -1 + * if data1 == data2 return 0 + */ +#define UI64_CMP(data1, data2) (((data1) > (data2)) ? 1 : (((data1) < (data2)) ? -1 : 0)) + +#else +#define UI64_HI(dst) ((dst).ui64[UI64_MSW]) +#define UI64_LOW(dst) ((dst).ui64[UI64_LSW]) +#define UI64_ASSIGN(dst, high, low) \ + do \ + { \ + UI64_HI(dst) = (high); \ + UI64_LOW(dst) = (low); \ + } while(0) + +#define UI64_SET(dst, src) \ + do \ + { \ + UI64_HI(dst) = UI64_HI(src); \ + UI64_LOW(dst) = UI64_LOW(src); \ + } while(0) + + +#define UI64_ADD_UI32(dst, src) \ + do \ + { \ + UI32_T _i_ = UI64_LOW(dst); \ + UI64_LOW(dst) += (src); \ + if (UI64_LOW(dst) < _i_) \ + { \ + UI64_HI(dst)++; \ + } \ + } while(0) + +#define UI64_SUB_UI32(dst, src) \ + do \ + { \ + UI32_T _i_ = UI64_LOW(dst); \ + UI64_LOW(dst) -= src; \ + if (UI64_LOW(dst) > _i_) \ + { \ + UI64_HI(dst)--; \ + } \ + } while(0) + + +#define UI64_ADD_UI64(dst, src) \ + do \ + { \ + UI32_T _i_ = UI64_LOW(dst); \ + UI64_LOW(dst) += UI64_LOW(src); \ + if (UI64_LOW(dst) < _i_) \ + { \ + UI64_HI(dst)++; \ + } \ + UI64_HI(dst) += UI64_HI(src); \ + } while(0) + +#define UI64_SUB_UI64(dst, src) \ + do { \ + UI32_T _i_ = UI64_LOW(dst); \ + UI64_LOW(dst) -= UI64_LOW(src); \ + if (UI64_LOW(dst) > _i_) \ + { \ + UI64_HI(dst)--; \ + } \ + UI64_HI(dst) -= UI64_HI(src); \ + } while(0) + + +#define UI64_AND(dst, src) \ + do { \ + UI64_HI(dst) &= UI64_HI(src); \ + UI64_LOW(dst) &= UI64_LOW(src); \ + } while(0) + +#define UI64_OR(dst, src) \ + do { \ + UI64_HI(dst) |= UI64_HI(src); \ + UI64_LOW(dst) |= UI64_LOW(src); \ + } while(0) + +#define UI64_XOR(dst, src) \ + do { \ + UI64_HI(dst) ^= UI64_HI(src); \ + UI64_LOW(dst) ^= UI64_LOW(src); \ + } while(0) + +#define UI64_NOT(dst) \ + do { \ + UI64_HI(dst) = ~UI64_HI(dst); \ + UI64_LOW(dst) = ~UI64_LOW(dst); \ + } while(0) + +/* UI64_T type data comparion: + * if data1 > data2 return 1 + * if data1 < data2 return -1 + * if data1 == data2 return 0 + */ +#define UI64_CMP(data1, data2) \ + (((data1).ui64[UI64_MSW] > (data2).ui64[UI64_MSW]) \ + ? 1 : (((data1).ui64[UI64_MSW] == (data2).ui64[UI64_MSW]) \ + ? (((data1).ui64[UI64_LSW] == (data2).ui64[UI64_LSW]) \ + ? 0 :(((data1).ui64[UI64_LSW] > (data2).ui64[UI64_LSW]) \ + ? 1 : -1)) : -1)) + +#define UI64_MULT_UI64(dst, src) \ + do \ + { \ + UI32_T _ret_low_ = 0; \ + UI32_T _ret_high_ = 0; \ + UI32_T _i_ = 0; \ + UI32_T _j_ = 0; \ + UI32_T _temp_ = 0; \ + UI32_T dst_t[4] = {0, 0, 0, 0}; \ + UI32_T src_t[4] = {0, 0, 0, 0}; \ + dst_t[0] = UI64_LOW(dst) & 0xFFFF; \ + dst_t[1] = UI64_LOW(dst) >> 16; \ + dst_t[2] = UI64_HI(dst) & 0xFFFF; \ + dst_t[3] = UI64_HI(dst) >> 16; \ + src_t[0] = UI64_LOW(src) & 0xFFFF; \ + src_t[1] = UI64_LOW(src) >> 16; \ + src_t[2] = UI64_HI(src) & 0xFFFF; \ + src_t[3] = UI64_HI(src) >> 16; \ + for(_i_ = 0; _i_ < 4; _i_++) \ + { \ + for(_j_ = 0; _j_ < 4; _j_++) \ + { \ + if((dst_t[_i_] != 0) && (src_t[_j_] != 0)) \ + { \ + _temp_ = dst_t[_i_] * src_t[_j_]; \ + if(0 == (_i_ + _j_)) \ + { \ + _ret_low_ += _temp_; \ + if (_ret_low_ < _temp_) \ + { \ + _ret_high_++; \ + } \ + } \ + if(1 == (_i_ + _j_)) \ + { \ + _ret_low_ += (_temp_ << 16); \ + if (_ret_low_ < (_temp_ << 16)) \ + { \ + _ret_high_++; \ + } \ + _ret_high_ += (_temp_ >> 16); \ + } \ + if(2 == (_i_+_j_)) \ + { \ + _ret_high_ += _temp_; \ + } \ + if(3 == (_i_ + _j_)) \ + { \ + _ret_high_ += (_temp_ << 16); \ + } \ + } \ + } \ + } \ + UI64_HI(dst) = _ret_high_; \ + UI64_LOW(dst) = _ret_low_; \ + } while(0) + +#define UI64_MULT_UI32(dst, src) \ + do \ + { \ + UI32_T _ret_low_ = 0; \ + UI32_T _ret_high_ = 0; \ + UI32_T _i_ = 0; \ + UI32_T _j_ = 0; \ + UI32_T _temp_ = 0; \ + UI32_T dst_t[4] = {0, 0, 0, 0}; \ + UI32_T src_t[2] = {0, 0}; \ + dst_t[0] = UI64_LOW(dst) & 0xFFFF; \ + dst_t[1] = UI64_LOW(dst) >> 16; \ + dst_t[2] = UI64_HI(dst) & 0xFFFF; \ + dst_t[3] = UI64_HI(dst) >> 16; \ + src_t[0] = src & 0xFFFF; \ + src_t[1] = src >> 16; \ + for(_i_ = 0; _i_ < 4; _i_++) \ + { \ + for(_j_ = 0; _j_ < 2; _j_++) \ + { \ + if((dst_t[_i_] != 0) && (src_t[_j_] != 0)) \ + { \ + _temp_ = dst_t[_i_] * src_t[_j_]; \ + if(0 == (_i_ + _j_)) \ + { \ + _ret_low_ += _temp_; \ + if (_ret_low_ < _temp_) \ + { \ + _ret_high_++; \ + } \ + } \ + if(1 == (_i_ + _j_)) \ + { \ + _ret_low_ += (_temp_ << 16); \ + if (_ret_low_ < (_temp_ << 16)) \ + { \ + _ret_high_++; \ + } \ + _ret_high_ += (_temp_ >> 16); \ + } \ + if(2 == (_i_ + _j_)) \ + { \ + _ret_high_ += _temp_; \ + } \ + if(3 == (_i_ + _j_)) \ + { \ + _ret_high_ += (_temp_ << 16); \ + } \ + } \ + } \ + } \ + UI64_HI(dst) = _ret_high_; \ + UI64_LOW(dst) = _ret_low_; \ + } while(0) + +#endif + +/* DATA TYPE DECLARATIONS + */ +typedef int BOOL_T; +typedef signed char I8_T; +typedef unsigned char UI8_T; +typedef signed short I16_T; +typedef unsigned short UI16_T; +typedef signed int I32_T; +typedef unsigned int UI32_T; +typedef char C8_T; + +#if defined(NPS_EN_COMPILER_SUPPORT_LONG_LONG) +typedef signed long long int I64_T; +typedef unsigned long long int UI64_T; +#else +typedef struct +{ + I32_T i64[2]; +} I64_T; + +typedef struct +{ + UI32_T ui64[2]; +} UI64_T; +#endif + +#endif /* OSAL_TYPES_H */ diff --git a/platform/nephos/nephos-modules/modules/src/make.mk b/platform/nephos/nephos-modules/modules/src/make.mk new file mode 100755 index 000000000000..e556ea10d765 --- /dev/null +++ b/platform/nephos/nephos-modules/modules/src/make.mk @@ -0,0 +1,37 @@ +################################################################################ +# Copyright (C) 2019 Nephos, Inc. +# +# This program is free software; you can redistribute it and/or +# modify it under the terms of version 2 of the GNU General Public +# License as published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# version 2 along with this program. +################################################################################ +DEV_MODULE_NAME := nps_dev +NETIF_MODULE_NAME := nps_netif +################################################################################ +DEV_OBJS_TOTAL := ./src/osal_mdc.o ./src/osal_isymbol.o +NETIF_OBJS_TOTAL := ./src/hal_tau_pkt_knl.o ./src/netif_perf.o ./src/netif_osal.o + +obj-m := $(DEV_MODULE_NAME).o $(NETIF_MODULE_NAME).o +$(DEV_MODULE_NAME)-objs := $(DEV_OBJS_TOTAL) +$(NETIF_MODULE_NAME)-objs := $(NETIF_OBJS_TOTAL) + +KBUILD_EXTRA_SYMBOLS := $(BUILD_OUTPUT_DIR)/Module.symvers +################################################################################ +folder: + $(TEST_PATH) $(BUILD_OUTPUT_DIR) || $(MKDIR) $(BUILD_OUTPUT_DIR) + $(TEST_PATH) $(BUILD_OUTPUT_DIR)/src || $(MKDIR) $(BUILD_OUTPUT_DIR)/src + +compile:: folder + touch $(BUILD_OUTPUT_DIR)/Makefile + $(MAKE) -C $(OS_PATH) M=$(BUILD_OUTPUT_DIR) src=$(shell pwd) modules EXTRA_CFLAGS="$(EXTRA_CFLAGS)" KBUILD_EXTRA_SYMBOLS=$(KBUILD_EXTRA_SYMBOLS) +install:: + +clean:: diff --git a/platform/nephos/nephos-modules/modules/src/netif_osal.c b/platform/nephos/nephos-modules/modules/src/netif_osal.c new file mode 100755 index 000000000000..f3029619261c --- /dev/null +++ b/platform/nephos/nephos-modules/modules/src/netif_osal.c @@ -0,0 +1,749 @@ +/* Copyright (C) 2019 Nephos, Inc. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of version 2 of the GNU General Public + * License as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * version 2 along with this program. + */ + +/* FILE NAME: netif_osal.c + * PURPOSE: + * It provide customer linux API. + * NOTES: + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0) +#include +#endif +#include + +/* ----------------------------------------------------------------------------------- macro value */ +#define OSAL_US_PER_SECOND (1000000) /* macro second per second */ +#define OSAL_NS_PER_USECOND (1000) /* nano second per macro second */ + +/* ----------------------------------------------------------------------------------- struct */ +extern struct pci_dev *_ptr_ext_pci_dev; + +static linux_thread_t _osal_thread_head = {{0}}; + +/* ----------------------------------------------------------------------------------- function */ +/* general */ +void * +osal_memset( + void *ptr_mem, + const I32_T value, + const UI32_T num) +{ + return memset(ptr_mem, value, num); +} + +void * +osal_memcpy( + void *ptr_dst, + const void *ptr_src, + const UI32_T num) +{ + return memcpy(ptr_dst, ptr_src, num); +} + +UI32_T +osal_strlen( + const C8_T *ptr_str) +{ + return strlen(ptr_str); +} + +void +osal_printf( + const C8_T *ptr_fmt, + ...) +{ + va_list ap; + char buf[OSAL_PRN_BUF_SZ]; + + if (NULL != ptr_fmt) + { + va_start(ap, ptr_fmt); + vsnprintf(buf, OSAL_PRN_BUF_SZ, ptr_fmt, ap); + va_end(ap); + + printk("%s", buf); + } +} + +void * +osal_alloc( + const UI32_T size) +{ + return kmalloc(size, GFP_ATOMIC); +} + +void +osal_free( + const void *ptr_mem) +{ + kfree(ptr_mem); +} + +/* thread */ +NPS_ERROR_NO_T +osal_init(void) +{ + linux_thread_t *ptr_thread_head = &_osal_thread_head; + + memset(ptr_thread_head, 0x0, sizeof(linux_thread_t)); + + /* init */ + ptr_thread_head->ptr_prev = ptr_thread_head; + ptr_thread_head->ptr_next = ptr_thread_head; + + return (NPS_E_OK); +} + +NPS_ERROR_NO_T +osal_deinit(void) +{ + return (NPS_E_OK); +} + +NPS_ERROR_NO_T +osal_createThread( + const C8_T *ptr_thread_name, + const UI32_T stack_size, + const UI32_T priority, + void (function)(void*), + void *ptr_arg, + NPS_THREAD_ID_T *ptr_thread_id) +{ + char dft_name[OSAL_THREAD_NAME_LEN + 1] = OSAL_THREAD_DFT_NAME; + linux_thread_t *ptr_thread_head = &_osal_thread_head; + linux_thread_t *ptr_thread_node = osal_alloc(sizeof(linux_thread_t)); + + /* process name */ + osal_memcpy(ptr_thread_node->name, (0 == osal_strlen(ptr_thread_name))? + dft_name : ptr_thread_name, OSAL_THREAD_NAME_LEN); + ptr_thread_node->name[OSAL_THREAD_NAME_LEN] = '\0'; + + /* init */ + ptr_thread_node->ptr_task = kthread_create((int(*)(void *))function, ptr_arg, ptr_thread_name); + ptr_thread_node->ptr_task->policy = SCHED_RR; + ptr_thread_node->ptr_task->rt_priority = priority; + ptr_thread_node->is_stop = FALSE; + + *ptr_thread_id = (NPS_THREAD_ID_T)ptr_thread_node; + + wake_up_process(ptr_thread_node->ptr_task); + + /* append the thread_node */ + ptr_thread_node->ptr_prev = ptr_thread_head->ptr_prev; + ptr_thread_head->ptr_prev->ptr_next = ptr_thread_node; + ptr_thread_node->ptr_next = ptr_thread_head; + ptr_thread_head->ptr_prev = ptr_thread_node; + + return (NPS_E_OK); +} + +NPS_ERROR_NO_T +osal_stopThread( + NPS_THREAD_ID_T *ptr_thread_id) +{ + linux_thread_t *ptr_thread_node = (linux_thread_t *)(*ptr_thread_id); + + ptr_thread_node->is_stop = TRUE; + + return (NPS_E_OK); +} + +NPS_ERROR_NO_T +osal_destroyThread( + NPS_THREAD_ID_T *ptr_thread_id) +{ + linux_thread_t *ptr_thread_node = (linux_thread_t *)(*ptr_thread_id); + + kthread_stop(ptr_thread_node->ptr_task); + + /* remove the thread_node */ + ptr_thread_node->ptr_next->ptr_prev = ptr_thread_node->ptr_prev; + ptr_thread_node->ptr_prev->ptr_next = ptr_thread_node->ptr_next; + + osal_free(ptr_thread_node); + *ptr_thread_id = 0; + + return (NPS_E_OK); +} + +void +osal_initRunThread(void) +{ + /* for reboot or shutdown without stopping kthread */ + allow_signal(SIGTERM); +} + +NPS_ERROR_NO_T +osal_isRunThread(void) +{ + linux_thread_t *ptr_thread_node = _osal_thread_head.ptr_next; + + while (1) + { + if (ptr_thread_node == &_osal_thread_head) + { + osal_printf("Cannot find task 0x%x.\n", current); + break; + } + if (ptr_thread_node->ptr_task == current) + { + break; + } + ptr_thread_node = ptr_thread_node->ptr_next; + } + + if ((TRUE == ptr_thread_node->is_stop) || (signal_pending(current))) + { + return (NPS_E_OTHERS); + } + + return (NPS_E_OK); +} + +void +osal_exitRunThread(void) +{ + while (!kthread_should_stop() && !signal_pending(current)) + { + osal_sleepThread(OSAL_NS_PER_USECOND); + } +} + +/* semaphore */ +NPS_ERROR_NO_T +osal_createSemaphore( + const C8_T *ptr_sema_name, + const UI32_T sema_count, + NPS_SEMAPHORE_ID_T *ptr_semaphore_id) +{ + char dft_name[OSAL_SEMA_NAME_LEN + 1] = OSAL_SEMA_DFT_NAME; + linux_sema_t *ptr_sema = osal_alloc(sizeof(linux_sema_t)); + + /* process name */ + osal_memcpy(ptr_sema->name, (0 == osal_strlen(ptr_sema_name))? + dft_name : ptr_sema_name, OSAL_SEMA_NAME_LEN); + ptr_sema->name[OSAL_SEMA_NAME_LEN] = '\0'; + + /* init */ + sema_init(&ptr_sema->lock, NPS_SEMAPHORE_BINARY); + + *ptr_semaphore_id = (NPS_SEMAPHORE_ID_T)ptr_sema; + + return (NPS_E_OK); +} + +NPS_ERROR_NO_T +osal_takeSemaphore( + NPS_SEMAPHORE_ID_T *ptr_semaphore_id, + UI32_T time_out) +{ + linux_sema_t *ptr_sema = (linux_sema_t *)(*ptr_semaphore_id); + + if (in_interrupt()) + { + return (NPS_E_OTHERS); + } + + if (!down_interruptible(&ptr_sema->lock)) + { + return (NPS_E_OK); + } + + return (NPS_E_OTHERS); +} + +NPS_ERROR_NO_T +osal_giveSemaphore( + NPS_SEMAPHORE_ID_T *ptr_semaphore_id) +{ + linux_sema_t *ptr_sema = (linux_sema_t *)(*ptr_semaphore_id); + + up(&ptr_sema->lock); + + return (NPS_E_OK); +} + +NPS_ERROR_NO_T +osal_destroySemaphore( + NPS_SEMAPHORE_ID_T *ptr_semaphore_id) +{ + linux_sema_t *ptr_sema = (linux_sema_t *)(*ptr_semaphore_id); + + osal_free(ptr_sema); + *ptr_semaphore_id = 0; + + return (NPS_E_OK); +} + +/* event */ +NPS_ERROR_NO_T +osal_createEvent( + const C8_T *ptr_event_name, + NPS_SEMAPHORE_ID_T *ptr_event_id) +{ + char dft_name[OSAL_EVENT_NAME_LEN + 1] = OSAL_EVENT_DFT_NAME; + linux_event_t *ptr_event = osal_alloc(sizeof(linux_event_t)); + + /* process name */ + osal_memcpy(ptr_event->name, (0 == osal_strlen(ptr_event_name))? + dft_name : ptr_event_name, OSAL_EVENT_NAME_LEN); + ptr_event->name[OSAL_EVENT_NAME_LEN] = '\0'; + + /* init */ + ptr_event->condition = FALSE; + init_waitqueue_head(&ptr_event->wait_que); + + *ptr_event_id = (NPS_SEMAPHORE_ID_T)ptr_event; + + return (NPS_E_OK); +} + +NPS_ERROR_NO_T +osal_waitEvent( + NPS_SEMAPHORE_ID_T *ptr_event_id) +{ + linux_event_t *ptr_event = (linux_event_t *)(*ptr_event_id); + + if (!wait_event_interruptible(ptr_event->wait_que, ptr_event->condition)) + { + ptr_event->condition = FALSE; + + return (NPS_E_OK); + } + + return (NPS_E_OTHERS); +} + +NPS_ERROR_NO_T +osal_triggerEvent( + NPS_SEMAPHORE_ID_T *ptr_event_id) +{ + linux_event_t *ptr_event = (linux_event_t *)(*ptr_event_id); + + ptr_event->condition = TRUE; + wake_up_interruptible(&ptr_event->wait_que); + + return (NPS_E_OK); +} + +NPS_ERROR_NO_T +osal_destroyEvent( + NPS_SEMAPHORE_ID_T *ptr_event_id) +{ + linux_event_t *ptr_event = (linux_event_t *)(*ptr_event_id); + + osal_free(ptr_event); + *ptr_event_id = 0; + + return (NPS_E_OK); +} + +/* isr_lock */ +NPS_ERROR_NO_T +osal_createIsrLock( + const C8_T *ptr_isrlock_name, + NPS_ISRLOCK_ID_T *ptr_isrlock_id) +{ + char dft_name[OSAL_SPIN_NAME_LEN + 1] = OSAL_SPIN_DFT_NAME; + linux_isrlock_t *ptr_isrlock = osal_alloc(sizeof(linux_isrlock_t)); + + /* process name */ + osal_memcpy(ptr_isrlock->name, (0 == osal_strlen(ptr_isrlock_name))? + dft_name : ptr_isrlock_name, OSAL_SPIN_NAME_LEN); + ptr_isrlock->name[OSAL_SPIN_NAME_LEN] = '\0'; + + /* init */ + spin_lock_init(&ptr_isrlock->spinlock); + + *ptr_isrlock_id = (NPS_ISRLOCK_ID_T)ptr_isrlock; + + return (NPS_E_OK); +} + +NPS_ERROR_NO_T +osal_takeIsrLock( + NPS_ISRLOCK_ID_T *ptr_isrlock_id, + NPS_IRQ_FLAGS_T *ptr_irq_flags) +{ + linux_isrlock_t *ptr_isrlock = (linux_isrlock_t *)(*ptr_isrlock_id); + unsigned long flags = 0; + + spin_lock_irqsave(&ptr_isrlock->spinlock, flags); + *ptr_irq_flags = (NPS_IRQ_FLAGS_T)flags; + + return (NPS_E_OK); +} + +NPS_ERROR_NO_T +osal_giveIsrLock( + NPS_ISRLOCK_ID_T *ptr_isrlock_id, + NPS_IRQ_FLAGS_T *ptr_irq_flags) +{ + linux_isrlock_t *ptr_isrlock = (linux_isrlock_t *)(*ptr_isrlock_id); + unsigned long flags = 0; + + flags = (unsigned long)(*ptr_irq_flags); + spin_unlock_irqrestore(&ptr_isrlock->spinlock, flags); + + return (NPS_E_OK); +} + +NPS_ERROR_NO_T +osal_destroyIsrLock( + NPS_ISRLOCK_ID_T *ptr_isrlock_id) +{ + linux_isrlock_t *ptr_isrlock = (linux_isrlock_t *)(*ptr_isrlock_id); + + osal_free(ptr_isrlock); + *ptr_isrlock_id = 0; + + return (NPS_E_OK); +} + +/* time */ +NPS_ERROR_NO_T +osal_sleepThread( + const UI32_T usecond) +{ + UI32_T tick_usec; /* how many usec per second */ + UI32_T jiffies; + + if (0 != usecond) + { + /* HZ : times/sec, tick = 1/HZ */ + tick_usec = OSAL_TICKS_PER_SEC / HZ; + if (in_interrupt() || (usecond < tick_usec)) + { + return (-1); + } + else + { + DECLARE_WAIT_QUEUE_HEAD(suspend_queue); + + if (usecond > 0xFFFFFFFF - (tick_usec - 1)) + { + jiffies = 0xFFFFFFFF / tick_usec; + } + else + { + jiffies = (usecond + (tick_usec - 1)) / tick_usec; + } + + return wait_event_interruptible_timeout(suspend_queue, 0, jiffies); + } + } + + return (NPS_E_OK); +} + +NPS_ERROR_NO_T +osal_getTime( + NPS_TIME_T *ptr_time) +{ + struct timeval usec_time; + + do_gettimeofday(&usec_time); + *(NPS_TIME_T *)ptr_time = (usec_time.tv_sec * OSAL_US_PER_SECOND) + usec_time.tv_usec; + + return (NPS_E_OK); +} + +/* queue */ +NPS_ERROR_NO_T +osal_que_create( + NPS_HUGE_T *ptr_queue_id, + UI32_T capacity) +{ + linux_queue_t *ptr_queue = osal_alloc(sizeof(linux_queue_t)); + + ptr_queue->head = 0; + ptr_queue->tail = 0; + ptr_queue->wr_cnt = 0; + ptr_queue->rd_cnt = 0; + ptr_queue->capacity = capacity; + ptr_queue->ptr_entry = osal_alloc(sizeof(linux_queue_entry_t) * capacity); + memset(ptr_queue->ptr_entry, 0x0, sizeof(linux_queue_entry_t) * capacity); + + *ptr_queue_id = (NPS_HUGE_T)ptr_queue; + + return (NPS_E_OK); +} + +NPS_ERROR_NO_T +osal_que_enque( + NPS_HUGE_T *ptr_queue_id, + void *ptr_data) +{ + linux_queue_t *ptr_queue = (linux_queue_t *)(*ptr_queue_id); + + if (ptr_queue->wr_cnt - ptr_queue->rd_cnt >= ptr_queue->capacity) + { + return (NPS_E_OTHERS); + } + + /* save data to the tail */ + ptr_queue->ptr_entry[ptr_queue->tail].ptr_data = ptr_data; + + /* calculate tail and wr_cnt */ + ptr_queue->tail++; + if (ptr_queue->tail >= ptr_queue->capacity) + { + ptr_queue->tail = 0; + } + + ptr_queue->wr_cnt++; + + return (NPS_E_OK); +} + +NPS_ERROR_NO_T +osal_que_deque( + NPS_HUGE_T *ptr_queue_id, + void **pptr_data) +{ + linux_queue_t *ptr_queue = (linux_queue_t *)(*ptr_queue_id); + + if (ptr_queue->wr_cnt == ptr_queue->rd_cnt) + { + return (NPS_E_OTHERS); + } + + /* get data from head */ + *pptr_data = ptr_queue->ptr_entry[ptr_queue->head].ptr_data; + ptr_queue->ptr_entry[ptr_queue->head].ptr_data = NULL; + + /* calculate head and rd_cnt */ + ptr_queue->head++; + if (ptr_queue->head >= ptr_queue->capacity) + { + ptr_queue->head = 0; + } + + ptr_queue->rd_cnt++; + + return (NPS_E_OK); +} + +NPS_ERROR_NO_T +osal_que_destroy( + NPS_HUGE_T *ptr_queue_id) +{ + linux_queue_t *ptr_queue = (linux_queue_t *)(*ptr_queue_id); + + osal_free(ptr_queue->ptr_entry); + osal_free(ptr_queue); + *ptr_queue_id = 0; + + return (NPS_E_OK); +} + +NPS_ERROR_NO_T +osal_que_getCount( + NPS_HUGE_T *ptr_queue_id, + unsigned int *ptr_count) +{ + linux_queue_t *ptr_queue = (linux_queue_t *)(*ptr_queue_id); + + *ptr_count = ptr_queue->wr_cnt - ptr_queue->rd_cnt; + + return (NPS_E_OK); +} + +/* IO */ +int +osal_io_copyToUser( + void *ptr_usr_buf, + void *ptr_knl_buf, + unsigned int size) +{ + return copy_to_user(ptr_usr_buf, ptr_knl_buf, size); +} + +int +osal_io_copyFromUser( + void *ptr_knl_buf, + void *ptr_usr_buf, + unsigned int size) +{ + return copy_from_user(ptr_knl_buf, ptr_usr_buf, size); +} + +/* dma */ +void * +osal_dma_alloc( + const UI32_T size) +{ + struct device *ptr_dev = &_ptr_ext_pci_dev->dev; + linux_dma_t *ptr_dma_node = NULL; + dma_addr_t phy_addr = 0x0; + + ptr_dma_node = dma_alloc_coherent(ptr_dev, sizeof(linux_dma_t) + size, &phy_addr, GFP_ATOMIC); + ptr_dma_node->size = sizeof(linux_dma_t) + size; + ptr_dma_node->phy_addr = phy_addr; + + return (void *)ptr_dma_node->data; +} + +NPS_ERROR_NO_T +osal_dma_free( + void *ptr_dma_mem) +{ + struct device *ptr_dev = &_ptr_ext_pci_dev->dev; + linux_dma_t *ptr_dma_node = (linux_dma_t *)(ptr_dma_mem - sizeof(linux_dma_t)); + + dma_free_coherent(ptr_dev, ptr_dma_node->size, ptr_dma_node, ptr_dma_node->phy_addr); + + return (NPS_E_OK); +} + +dma_addr_t +osal_dma_convertVirtToPhy( + void *ptr_virt_addr) +{ + return virt_to_phys(ptr_virt_addr); +} + +void * +osal_dma_convertPhyToVirt( + const dma_addr_t phy_addr) +{ + return phys_to_virt(phy_addr); +} + +int +osal_dma_flushCache( + void *ptr_virt_addr, + const unsigned int size) +{ +#if defined(CONFIG_NOT_COHERENT_CACHE) || defined(CONFIG_DMA_NONCOHERENT) +#if defined(dma_cache_wback_inv) + dma_cache_wback_inv((NPS_HUGE_T)ptr_virt_addr, size); +#else + dma_cache_sync(NULL, ptr_virt_addr, size, DMA_TO_DEVICE); +#endif +#endif + return (0); +} + +int +osal_dma_invalidateCache( + void *ptr_virt_addr, + const unsigned int size) +{ +#if defined(CONFIG_NOT_COHERENT_CACHE) || defined(CONFIG_DMA_NONCOHERENT) +#if defined(dma_cache_wback_inv) + dma_cache_wback_inv((NPS_HUGE_T)ptr_virt_addr, size); +#else + dma_cache_sync(NULL, ptr_virt_addr, size, DMA_FROM_DEVICE); +#endif +#endif + return (0); +} + +/* skb */ +struct sk_buff * +osal_skb_alloc( + UI32_T size) +{ + struct sk_buff *ptr_skb = NULL; + + /* + * 1. alloc_skb (len, flag) : GFP_KERNEL + * 2. netdev_alloc_skb (dev, len) : GFP_ATOMIC + * 3. dev_alloc_skb (len) : GFP_ATOMIC + * 4. netdev_alloc_skb_ip_align (dev, len) : GFP_ATOMIC + * + * note: Eth header is 14-bytes, we reservd 2-bytes to alignment Ip header + */ + ptr_skb = dev_alloc_skb(size + NET_IP_ALIGN); + skb_reserve(ptr_skb, NET_IP_ALIGN); + skb_put(ptr_skb, size); + + return (ptr_skb); +} + +void +osal_skb_free( + struct sk_buff *ptr_skb) +{ + /* + * 1. dev_kfree_skb (*skb) : release in process context + * 2. dev_kfree_skb_irq (*skb) : release in interrupt context + * 3. dev_kfree_skb_any (*skb) : release in any context + */ + dev_kfree_skb_any(ptr_skb); +} + +dma_addr_t +osal_skb_mapDma( + struct sk_buff *ptr_skb, + enum dma_data_direction dir) +{ + struct device *ptr_dev = &_ptr_ext_pci_dev->dev; + dma_addr_t phy_addr = 0x0; + + phy_addr = dma_map_single(ptr_dev, ptr_skb->data, ptr_skb->len, dir); + if (dma_mapping_error(ptr_dev, phy_addr)) + { + phy_addr = 0x0; + } + + return (phy_addr); +} + +void +osal_skb_unmapDma( + const dma_addr_t phy_addr, + UI32_T size, + enum dma_data_direction dir) +{ + struct device *ptr_dev = &_ptr_ext_pci_dev->dev; + + dma_unmap_single(ptr_dev, phy_addr, size, dir); +} + +void +osal_skb_send( + struct sk_buff *ptr_skb) +{ + dev_queue_xmit(ptr_skb); +} + +void +osal_skb_recv( + struct sk_buff *ptr_skb) +{ + /* 1. netif_rx() : handle skb in process context + * 2. netif_rx_ni() : handle skb in interrupt context + * 3. netif_receive_skb() : for NAPI + */ + netif_rx(ptr_skb); +} + diff --git a/platform/nephos/nephos-modules/modules/src/netif_perf.c b/platform/nephos/nephos-modules/modules/src/netif_perf.c new file mode 100755 index 000000000000..18606d6d25d4 --- /dev/null +++ b/platform/nephos/nephos-modules/modules/src/netif_perf.c @@ -0,0 +1,656 @@ +/* Copyright (C) 2019 Nephos, Inc. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of version 2 of the GNU General Public + * License as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * version 2 along with this program. + */ + +/* FILE NAME: netif_perf.c + * PURPOSE: + * It provide customer performance test API. + * NOTES: + */ +#include +#include + +#include +#include + +#if defined (NPS_EN_TAURUS) +#include +#endif + +/* -------------------------------------------------------------- switch */ +#if defined (NPS_EN_ARIES) +#define PERF_TX_CHANNEL_NUM_MAX (HAL_ARI_PKT_TX_CHANNEL_LAST) +#define PERF_RX_CHANNEL_NUM_MAX (HAL_ARI_PKT_RX_CHANNEL_LAST) +typedef HAL_ARI_PKT_TX_SW_GPD_T PERF_TX_SW_GPD; +typedef HAL_ARI_PKT_RX_SW_GPD_T PERF_RX_SW_GPD; +#endif + +#if defined (NPS_EN_TAURUS) +#define PERF_TX_CHANNEL_NUM_MAX (HAL_TAU_PKT_TX_CHANNEL_LAST) +#define PERF_RX_CHANNEL_NUM_MAX (HAL_TAU_PKT_RX_CHANNEL_LAST) +typedef HAL_TAU_PKT_TX_SW_GPD_T PERF_TX_SW_GPD; +typedef HAL_TAU_PKT_RX_SW_GPD_T PERF_RX_SW_GPD; +#endif + +/* -------------------------------------------------------------- common */ +#define PERF_TX_PERF_NUM (1000000) /* max: 4294967 */ +#define PERF_TX_PERF_MESG (50000) +#define PERF_TX_PERF_FAIL (10000) +#define PERF_RX_PERF_NUM (1000000) /* max: 4294967 */ +#define PERF_RX_PERF_MESG (50000) +#define PERF_RX_PERF_FAIL (10000) + +/* -------------------------------------------------------------- callbacks for chip dependency */ +/* Tx */ +typedef NPS_ERROR_NO_T +(*PERF_TX_GET_INTR_T)( + const UI32_T unit, + const UI32_T channel, + UI32_T *ptr_intr_cnt); + +typedef NPS_ERROR_NO_T +(*PERF_TX_GET_NETDEV_T)( + const UI32_T unit, + const UI32_T port, + struct net_device **pptr_net_dev); + +typedef NPS_ERROR_NO_T +(*PERF_TX_PREPARE_GPD_T)( + const UI32_T unit, + const NPS_ADDR_T phy_addr, + const UI32_T len, + const UI32_T port, + PERF_TX_SW_GPD *ptr_sw_gpd); + +typedef NPS_ERROR_NO_T +(*PERF_TX_SEND_GPD_T)( + const UI32_T unit, + const UI32_T channel, + PERF_TX_SW_GPD *ptr_sw_gpd); + +/* Rx */ +typedef NPS_ERROR_NO_T +(*PERF_RX_GET_INTR_T)( + const UI32_T unit, + const UI32_T channel, + UI32_T *ptr_intr_cnt); + +/* -------------------------------------------------------------- structs */ +typedef enum +{ + PERF_DIR_TX = 0, + PERF_DIR_RX, + PERF_DIR_LAST, + +} PERF_DIR_T; + +typedef struct +{ + UI32_T unit; + UI32_T channel; + UI32_T len; + UI32_T num; + UI32_T port; + BOOL_T test_skb; + +} PERF_COOKIE_T; + +typedef struct +{ + /* netif-only */ + PERF_COOKIE_T tx_cookie [PERF_TX_CHANNEL_NUM_MAX]; + + NPS_THREAD_ID_T tx_task [PERF_TX_CHANNEL_NUM_MAX]; + NPS_SEMAPHORE_ID_T start_sync [PERF_TX_CHANNEL_NUM_MAX]; + NPS_SEMAPHORE_ID_T end_sync [PERF_TX_CHANNEL_NUM_MAX]; + UI32_T send_ok [PERF_TX_CHANNEL_NUM_MAX]; + UI32_T send_fail [PERF_TX_CHANNEL_NUM_MAX]; + + /* chip dependent callbacks */ + PERF_TX_GET_INTR_T get_intr_cnt; + PERF_TX_GET_NETDEV_T get_netdev; + PERF_TX_PREPARE_GPD_T prepare_gpd; + PERF_TX_SEND_GPD_T send_gpd; + +} PERF_TX_PERF_CB_T; + +typedef struct +{ + /* netif-only */ + BOOL_T rx_test; + + NPS_SEMAPHORE_ID_T start_sync; + NPS_SEMAPHORE_ID_T end_sync; + UI32_T target_num; + UI32_T target_len; + UI32_T recv_pass; + UI32_T recv_fail; + + /* duplicate packets */ + UI32_T rch_qid_map_lo [PERF_RX_CHANNEL_NUM_MAX]; + UI32_T rch_qid_map_hi [PERF_RX_CHANNEL_NUM_MAX]; + + /* chip dependent callbacks */ + PERF_RX_GET_INTR_T get_intr_cnt; + +} PERF_RX_PERF_CB_T; + +/* -------------------------------------------------------------- statics */ +static PERF_TX_PERF_CB_T _perf_tx_perf_cb = +{ +#if defined (NPS_EN_ARIES) + .get_intr_cnt = hal_ari_pkt_getTxIntrCnt, + .get_netdev = hal_ari_pkt_getNetDev, /* test_skb = TRUE */ + .prepare_gpd = hal_ari_pkt_prepareGpd, /* test_skb = FALSE */ + .send_gpd = hal_ari_pkt_sendGpd, /* test_skb = FALSE */ +#endif +#if defined (NPS_EN_TAURUS) + .get_intr_cnt = hal_tau_pkt_getTxIntrCnt, + .get_netdev = hal_tau_pkt_getNetDev, /* test_skb = TRUE */ + .prepare_gpd = hal_tau_pkt_prepareGpd, /* test_skb = FALSE */ + .send_gpd = hal_tau_pkt_sendGpd, /* test_skb = FALSE */ +#endif +}; + +static PERF_RX_PERF_CB_T _perf_rx_perf_cb = +{ +#if defined (NPS_EN_ARIES) + .get_intr_cnt = hal_ari_pkt_getRxIntrCnt, +#endif +#if defined (NPS_EN_TAURUS) + .get_intr_cnt = hal_tau_pkt_getRxIntrCnt, +#endif +}; + +/* -------------------------------------------------------------- functions */ +static void +_perf_duplicateRxPacket( + const UI32_T unit, + const UI32_T rx_channel, + const BOOL_T enable) +{ + ; +} + +static void +_perf_showPerf( + PERF_DIR_T dir, + UI32_T channel, + UI32_T len, + UI32_T num, + UI32_T intr, + UI32_T duration) +{ + UI32_T tx_channel = 0; + UI32_T tx_fail = 0; + + if (duration < 1000) + { + osal_printf("***Error***, %d packets cost < 1000 us.\n", num); + return ; + } + + osal_printf("\n"); + + if (PERF_DIR_TX == dir) + { + osal_printf("Tx-perf\n"); + } + else + { + osal_printf("Rx-perf\n"); + } + + osal_printf("------------------------------------\n"); + osal_printf("channel number : %d\n", channel); + osal_printf("packet length (bytes): %d\n", len); + osal_printf("packet number : %d\n", num); + osal_printf("time duration (us) : %d\n", duration); + osal_printf("------------------------------------\n"); + osal_printf("avg. packet rate (pps) : %d\n", (num * 1000) / (duration / 1000)); + osal_printf("avg. throughput (Mbps) : %d\n", ((num / 1000) * len * 8) / (duration / 1000)); + osal_printf("interrupt number : %d\n", intr); + + if (PERF_DIR_TX == dir) + { + for (tx_channel = 0; tx_channel < channel; tx_channel++) + { + tx_fail += _perf_tx_perf_cb.send_fail[tx_channel]; + } + osal_printf("Tx fail : %d\n", tx_fail); + } + + osal_printf("------------------------------------\n"); +} + +static void +_perf_getIntrCnt( + UI32_T unit, + PERF_DIR_T dir, + UI32_T *ptr_intr_cnt) +{ + UI32_T intr_cnt = 0; + UI32_T channel = 0; + + if (PERF_DIR_TX == dir) + { + for (channel = 0; channel < PERF_TX_CHANNEL_NUM_MAX; channel++) + { + _perf_tx_perf_cb.get_intr_cnt(unit, channel, &intr_cnt); + *ptr_intr_cnt += intr_cnt; + } + } + else + { + for (channel = 0; channel < PERF_RX_CHANNEL_NUM_MAX; channel++) + { + _perf_rx_perf_cb.get_intr_cnt(unit, channel, &intr_cnt); + *ptr_intr_cnt += intr_cnt; + } + } +} + +static void +_perf_txCallback( + const UI32_T unit, + PERF_TX_SW_GPD *ptr_sw_gpd, + void *ptr_virt_addr) +{ + /* free dma */ + osal_dma_free(ptr_virt_addr); + + /* free gpd */ + osal_free(ptr_sw_gpd); +} + +static void +_perf_txTask( + void *ptr_argv) +{ + NPS_ERROR_NO_T rc = NPS_E_OK; + UI32_T unit = ((PERF_COOKIE_T *)ptr_argv)->unit; + UI32_T channel = ((PERF_COOKIE_T *)ptr_argv)->channel; + UI32_T len = ((PERF_COOKIE_T *)ptr_argv)->len; + UI32_T num = ((PERF_COOKIE_T *)ptr_argv)->num; + UI32_T port = ((PERF_COOKIE_T *)ptr_argv)->port; + BOOL_T test_skb = ((PERF_COOKIE_T *)ptr_argv)->test_skb; + + /* test targets */ + PERF_TX_SW_GPD *ptr_sw_gpd = NULL; + struct sk_buff *ptr_skb = NULL; + + /* temp variables */ + UI32_T send_fail = 0; + void *ptr_virt_addr = NULL; + NPS_ADDR_T phy_addr = 0x0; + + osal_initRunThread(); + do + { + rc = osal_waitEvent(&_perf_tx_perf_cb.start_sync[channel]); + if (NPS_E_OK != osal_isRunThread()) + { + break; /* deinit-thread */ + } + + while (_perf_tx_perf_cb.send_ok[channel] < num) + { + if (0 == (_perf_tx_perf_cb.send_ok[channel] % PERF_TX_PERF_MESG)) + { + printk("T"); + } + + if (TRUE == test_skb) + { + ptr_skb = osal_skb_alloc(len); + ptr_skb->len = len; + _perf_tx_perf_cb.get_netdev(unit, port, &ptr_skb->dev); + + /* send skb */ + osal_skb_send(ptr_skb); + } + else + { + ptr_sw_gpd = osal_alloc(sizeof(PERF_TX_SW_GPD)); + if (NULL == ptr_sw_gpd) + { + osal_printf("***Error***, alloc sw-gpd fail.\n"); + break; + } + + /* prepare buf */ + ptr_virt_addr = osal_dma_alloc(len); + phy_addr = osal_dma_convertVirtToPhy(ptr_virt_addr); + + /* trans skb to gpd */ + osal_memset(ptr_sw_gpd, 0x0, sizeof(PERF_TX_SW_GPD)); + ptr_sw_gpd->callback = (void *)_perf_txCallback; + ptr_sw_gpd->ptr_cookie = (void *)ptr_virt_addr; + ptr_sw_gpd->gpd_num = 1; + ptr_sw_gpd->ptr_next = NULL; + ptr_sw_gpd->channel = channel; + + /* prepare gpd */ + rc = _perf_tx_perf_cb.prepare_gpd(unit, phy_addr, len, port, ptr_sw_gpd); + + /* send gpd */ + rc = _perf_tx_perf_cb.send_gpd(unit, channel, ptr_sw_gpd); + if (NPS_E_OK == rc) + { + _perf_tx_perf_cb.send_ok[channel]++; + send_fail = 0; + } + else + { + _perf_tx_perf_cb.send_fail[channel]++; + if (send_fail++ >= PERF_TX_PERF_FAIL) + { + osal_printf("***Error***, Tch-%d send fail over %d packet(s). (rc: %d)\n", + channel, PERF_TX_PERF_FAIL, rc); + break; + } + + _perf_txCallback(unit, ptr_sw_gpd, ptr_virt_addr); + osal_sleepThread(1000); + } + } + } + + osal_triggerEvent(&_perf_tx_perf_cb.end_sync[channel]); + } + while (NPS_E_OK == osal_isRunThread()); + osal_exitRunThread(); +} + +static void +_perf_txDeinit( + const UI32_T unit, + const UI32_T tx_channel) +{ + UI32_T channel = 0; + + for (channel = 0; channel < tx_channel; channel++) + { + /* destroy Tx resources */ + osal_stopThread (&_perf_tx_perf_cb.tx_task [channel]); + osal_triggerEvent(&_perf_tx_perf_cb.start_sync [channel]); + osal_destroyThread(&_perf_tx_perf_cb.tx_task [channel]); + osal_destroyEvent(&_perf_tx_perf_cb.end_sync [channel]); + osal_destroyEvent(&_perf_tx_perf_cb.start_sync [channel]); + } +} + +static void +_perf_txInit( + const UI32_T unit, + const UI32_T tx_channel, + const UI32_T len, + BOOL_T test_skb) +{ + UI32_T channel = 0; + + for (channel = 0; channel < tx_channel; channel++) + { + _perf_tx_perf_cb.send_ok [channel] = 0; + _perf_tx_perf_cb.send_fail[channel] = 0; + + /* create Tx resources */ + osal_createEvent("TX_START", &_perf_tx_perf_cb.start_sync [channel]); + osal_createEvent("TX_END", &_perf_tx_perf_cb.end_sync [channel]); + + _perf_tx_perf_cb.tx_cookie[channel].unit = unit; + _perf_tx_perf_cb.tx_cookie[channel].channel = channel; + _perf_tx_perf_cb.tx_cookie[channel].len = len; + _perf_tx_perf_cb.tx_cookie[channel].num = PERF_TX_PERF_NUM / tx_channel; + _perf_tx_perf_cb.tx_cookie[channel].port = 0; + _perf_tx_perf_cb.tx_cookie[channel].test_skb = test_skb; + + osal_createThread( + "TX_PERF", 64 * 1024, 90, + _perf_txTask, + (void *)&_perf_tx_perf_cb.tx_cookie[channel], + &_perf_tx_perf_cb.tx_task[channel]); + } +} + +static void +_perf_rxDeinit( + const UI32_T unit, + const UI32_T rx_channel) +{ + /* turn-off Rx test */ + _perf_rx_perf_cb.rx_test = FALSE; + + /* destroy Rx resources */ + osal_destroyEvent(&_perf_rx_perf_cb.end_sync); + osal_destroyEvent(&_perf_rx_perf_cb.start_sync); + + /* disable duplicate Rx packets to channels */ + _perf_duplicateRxPacket(unit, rx_channel, FALSE); +} + +static void +_perf_rxInit( + const UI32_T unit, + const UI32_T rx_channel, + const UI32_T len) +{ + /* enable duplicate Rx packets to channels */ + _perf_duplicateRxPacket(unit, rx_channel, TRUE); + + /* create Rx callback resources */ + _perf_rx_perf_cb.target_num = PERF_RX_PERF_NUM; + _perf_rx_perf_cb.target_len = len; + _perf_rx_perf_cb.recv_pass = 0; + + osal_createEvent("RX_START", &_perf_rx_perf_cb.start_sync); + osal_createEvent("RX_END", &_perf_rx_perf_cb.end_sync); + + /* turn-on Rx test */ + _perf_rx_perf_cb.rx_test = TRUE; +} + +/* FUNCTION NAME: perf_rxCallback + * PURPOSE: + * To count the Rx-gpd for Rx-test. + * INPUT: + * len -- To check if the Rx-gpd length equals to test length. + * OUTPUT: + * None + * RETURN: + * NPS_E_OK -- Successful operation. + * NOTES: + * None + */ +NPS_ERROR_NO_T +perf_rxCallback( + const UI32_T len) +{ + /* check length */ + if (len == _perf_rx_perf_cb.target_len) + { + _perf_rx_perf_cb.recv_pass++; + } + else + { + if (_perf_rx_perf_cb.recv_fail++ >= PERF_RX_PERF_FAIL) + { + _perf_rx_perf_cb.recv_fail = 0; + } + } + + /* send signals */ + if (0 == _perf_rx_perf_cb.recv_pass) + { + ; /* do nothing */ + } + else if (1 == _perf_rx_perf_cb.recv_pass) + { + osal_triggerEvent(&_perf_rx_perf_cb.start_sync); + } + else if (_perf_rx_perf_cb.recv_pass == _perf_rx_perf_cb.target_num) + { + osal_triggerEvent(&_perf_rx_perf_cb.end_sync); + } + else if (0 == (_perf_rx_perf_cb.recv_pass % PERF_RX_PERF_MESG)) + { + printk("R"); + } + + return (NPS_E_OK); +} + +/* FUNCTION NAME: perf_rxTest + * PURPOSE: + * To check if Rx-test is going. + * INPUT: + * None + * OUTPUT: + * None + * RETURN: + * NPS_E_OK -- Successful operation. + * NOTES: + * None + */ +NPS_ERROR_NO_T +perf_rxTest(void) +{ + if (FALSE == _perf_rx_perf_cb.rx_test) + { + return (NPS_E_OTHERS); + } + + return (NPS_E_OK); +} + +/* FUNCTION NAME: perf_test + * PURPOSE: + * To do Tx-test or Rx-test. + * INPUT: + * len -- Test length + * tx_channel -- Test Tx channel numbers + * rx_channel -- Test Rx channel numbers + * test_skb -- Test GPD or SKB + * OUTPUT: + * None + * RETURN: + * NPS_E_OK -- Successful operation. + * NOTES: + * None + */ +NPS_ERROR_NO_T +perf_test( + UI32_T len, + UI32_T tx_channel, + UI32_T rx_channel, + BOOL_T test_skb) +{ + NPS_ERROR_NO_T rc = NPS_E_OK; + NPS_TIME_T start_time; + NPS_TIME_T end_time; + UI32_T unit = 0, channel = 0; + UI32_T tx_pkt_cnt = 0, tx_start_intr = 0, tx_end_intr = 0; + UI32_T rx_pkt_cnt = 0, rx_start_intr = 0, rx_end_intr = 0; + + if ((0 == tx_channel) && (0 == rx_channel)) + { + return (NPS_E_NOT_SUPPORT); + } + + /* start test */ + if ((tx_channel > 0) && (rx_channel > 0)) + { + _perf_getIntrCnt(unit, PERF_DIR_TX, &tx_start_intr); + _perf_getIntrCnt(unit, PERF_DIR_RX, &rx_start_intr); + _perf_txInit(unit, tx_channel, len, test_skb); + _perf_rxInit(unit, rx_channel, len); + + /* wait 1st Rx GPD done */ + osal_waitEvent(&_perf_rx_perf_cb.start_sync); + + /* ------------- in-time ------------- */ + osal_getTime(&start_time); + for (channel = 0; channel < tx_channel; channel++) + { + osal_triggerEvent(&_perf_tx_perf_cb.start_sync[channel]); + } + for (channel = 0; channel < tx_channel; channel++) + { + osal_waitEvent(&_perf_tx_perf_cb.end_sync[channel]); + tx_pkt_cnt += _perf_tx_perf_cb.send_ok[channel]; + } + rx_pkt_cnt = _perf_rx_perf_cb.recv_pass; + osal_getTime(&end_time); + /* ------------- in-time ------------- */ + + _perf_txDeinit(unit, tx_channel); + _perf_rxDeinit(unit, rx_channel); + _perf_getIntrCnt(unit, PERF_DIR_TX, &tx_end_intr); + _perf_getIntrCnt(unit, PERF_DIR_RX, &rx_end_intr); + + _perf_showPerf(PERF_DIR_TX, + tx_channel, len, tx_pkt_cnt, tx_end_intr - tx_start_intr, end_time - start_time); + + _perf_showPerf(PERF_DIR_RX, + rx_channel, len, rx_pkt_cnt, rx_end_intr - rx_start_intr, end_time - start_time); + } + else if (tx_channel > 0) + { + _perf_getIntrCnt(unit, PERF_DIR_TX, &tx_start_intr); + _perf_txInit(unit, tx_channel, len, test_skb); + + /* ------------- in-time ------------- */ + osal_getTime(&start_time); + for (channel = 0; channel < tx_channel; channel++) + { + osal_triggerEvent(&_perf_tx_perf_cb.start_sync[channel]); + } + for (channel = 0; channel < tx_channel; channel++) + { + osal_waitEvent(&_perf_tx_perf_cb.end_sync[channel]); + tx_pkt_cnt += _perf_tx_perf_cb.send_ok[channel]; + } + osal_getTime(&end_time); + /* ------------- in-time ------------- */ + + _perf_txDeinit(unit, tx_channel); + _perf_getIntrCnt(unit, PERF_DIR_TX, &tx_end_intr); + + _perf_showPerf(PERF_DIR_TX, + tx_channel, len, tx_pkt_cnt, tx_end_intr - tx_start_intr, end_time - start_time); + } + else if (rx_channel > 0) + { + _perf_getIntrCnt(unit, PERF_DIR_RX, &rx_start_intr); + _perf_rxInit(unit, rx_channel, len); + + /* wait 1st Rx GPD done */ + osal_waitEvent(&_perf_rx_perf_cb.start_sync); + + /* ------------- in-time ------------- */ + osal_getTime(&start_time); + osal_waitEvent(&_perf_rx_perf_cb.end_sync); + osal_getTime(&end_time); + /* ------------- in-time ------------- */ + + _perf_rxDeinit(unit, rx_channel); + _perf_getIntrCnt(unit, PERF_DIR_RX, &rx_end_intr); + + _perf_showPerf(PERF_DIR_RX, + rx_channel, len, PERF_RX_PERF_NUM, rx_end_intr - rx_start_intr, end_time - start_time); + } + + return (rc); +} + diff --git a/platform/nephos/nephos-modules/modules/src/osal_isymbol.c b/platform/nephos/nephos-modules/modules/src/osal_isymbol.c new file mode 100755 index 000000000000..c23cc70bed23 --- /dev/null +++ b/platform/nephos/nephos-modules/modules/src/osal_isymbol.c @@ -0,0 +1,38 @@ +/* Copyright (C) 2019 Nephos, Inc. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of version 2 of the GNU General Public + * License as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * version 2 along with this program. + */ + +/* FILE NAME: osal_isymbol.c +* PURPOSE: +* It provide global OSAL symbol export for linux kernel module +* NOTES: +*/ +#include +#include + +/* ----------------------------------------------------- */ +#include +/* dma */ +extern struct pci_dev *_ptr_ext_pci_dev; +EXPORT_SYMBOL(_ptr_ext_pci_dev); + +#if defined(NPS_LINUX_USER_MODE) +EXPORT_SYMBOL(osal_mdc_readPciReg); +EXPORT_SYMBOL(osal_mdc_writePciReg); +#if defined(NPS_EN_NETIF) +/* intr */ +/* for kernel module, this API will be exported by script with other OSAL functions in osal_symbol.c */ +EXPORT_SYMBOL(osal_mdc_registerIsr); +#endif +#endif diff --git a/platform/nephos/nephos-modules/modules/src/osal_mdc.c b/platform/nephos/nephos-modules/modules/src/osal_mdc.c new file mode 100755 index 000000000000..3dad3173ac79 --- /dev/null +++ b/platform/nephos/nephos-modules/modules/src/osal_mdc.c @@ -0,0 +1,2359 @@ +/* Copyright (C) 2019 Nephos, Inc. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of version 2 of the GNU General Public + * License as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * version 2 along with this program. + */ + +/* FILE NAME: osal_mdc.c + * PURPOSE: + * 1. Provide device operate from AML interface + * NOTES: + * + */ + +/* INCLUDE FILE DECLARATIONS + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +#include +#include +#include +#include + +#if defined(NPS_LINUX_USER_MODE) +#include + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,12,0) +#include +#else +#include +#endif + +#include +#include +#include +#include +#include +#include +#include +#include +#endif + +/* #define OSAL_MDC_EN_MSI */ +/* #define OSAL_MDC_DMA_RESERVED_MEM_CACHEABLE */ +/* #define OSAL_MDC_EN_TEST */ + +/* NAMING CONSTANT DECLARATIONS + */ +#define OSAL_MDC_PCI_BAR0_OFFSET (0x0) +#define OSAL_MDC_ERR printk + +/* MACRO FUNCTION DECLARATIONS + */ + +/* DATA TYPE DECLARATIONS + */ +typedef struct +{ + UI32_T unit; + struct pci_dev *ptr_pci_dev; + UI32_T *ptr_mmio_virt_addr; + int irq; + AML_DEV_ISR_FUNC_T isr_callback; + void *ptr_isr_data; + +} OSAL_MDC_DEV_T; + +typedef struct +{ + OSAL_MDC_DEV_T dev[NPS_CFG_MAXIMUM_CHIPS_PER_SYSTEM]; + UI32_T dev_num; + OSAL_MDC_DMA_INFO_T dma_info; + +} OSAL_MDC_CB_T; + +#if defined(NPS_LINUX_USER_MODE) + +typedef struct +{ + OSAL_MDC_IOCTL_CALLBACK_FUNC_T callback[OSAL_MDC_IOCTL_TYPE_LAST]; + +} OSAL_MDC_IOCTL_CB_T; + +#if !defined(NPS_EN_DMA_RESERVED) +typedef struct +{ + NPS_ADDR_T phy_addr; + UI32_T size; + struct list_head list; + +} OSAL_MDC_USER_MODE_DMA_NODE_T; +#endif + +#endif + + +#if defined(NPS_LINUX_KERNEL_MODE) + +/* re-define the interface to align OSAL_MDC's implementation with the prototype of CMLIB */ +#define osal_mdc_list_create(__capa__, __type__, __name__, __list__) _osal_mdc_list_create(__capa__, __list__) +#define osal_mdc_list_destroy(__list__, __callback__) _osal_mdc_list_destroy(__list__) +#define osal_mdc_list_getNodeData(__list__, __node__, __data__) _osal_mdc_list_getNodeData(__list__, __node__, __data__) +#define osal_mdc_list_next(__list__, __node__, __next__) _osal_mdc_list_next(__list__, __node__, __next__) +#define osal_mdc_list_locateHead(__list__, __node__) _osal_mdc_list_locateHead(__list__, __node__) +#define osal_mdc_list_insertToHead(__list__, __data__) _osal_mdc_list_insertToHead(__list__, __data__) +#define osal_mdc_list_deleteByData(__list__, __data__) _osal_mdc_list_deleteByData(__list__, __data__) + +#if defined(NPS_EN_DMA_RESERVED) +#define osal_mdc_list_insertBefore(__list__, __node__, __data__) _osal_mdc_list_insertBefore(__list__, __node__, __data__) +#define osal_mdc_list_prev(__list__, __node__, __prev__) _osal_mdc_list_prev(__list__, __node__, __prev__) +#endif + +#define OSAL_MDC_LIST_TYPE_DOUBLE (1) /* don't care the type, always be double */ +#define OSAL_MDC_LIST_TYPE_SINGLE (0) /* don't care the type, always be double */ + +static NPS_ERROR_NO_T +_osal_mdc_list_create( + const UI32_T capacity, + OSAL_MDC_LIST_T **pptr_list ) +{ + NPS_ERROR_NO_T rc = NPS_E_NO_MEMORY; + + *pptr_list = NULL; + + *pptr_list = osal_alloc(sizeof(OSAL_MDC_LIST_T)); + if (NULL != *pptr_list) + { + (*pptr_list)->capacity = capacity; + (*pptr_list)->node_cnt = 0; + (*pptr_list)->ptr_head_node = NULL; + (*pptr_list)->ptr_tail_node = NULL; + rc = NPS_E_OK; + } + + return (rc); +} + +static NPS_ERROR_NO_T +_osal_mdc_list_destroy( + OSAL_MDC_LIST_T *ptr_list ) +{ + OSAL_MDC_LIST_NODE_T *ptr_cur_node, *ptr_next_node; + NPS_ERROR_NO_T rc = NPS_E_OTHERS; + + if (NULL != ptr_list) + { + if (ptr_list->node_cnt != 0) + { + OSAL_MDC_ERR("dma list not empty, node num=%d\n", + ptr_list->node_cnt); + ptr_cur_node = ptr_list->ptr_head_node; + while(NULL != ptr_cur_node) + { + ptr_next_node = ptr_cur_node->ptr_next; + osal_free(ptr_cur_node); + ptr_list->node_cnt--; + ptr_cur_node = ptr_next_node; + } + } + + osal_free(ptr_list); + rc = NPS_E_OK; + } + + return (rc); +} + +static NPS_ERROR_NO_T +_osal_mdc_list_getNodeData( + OSAL_MDC_LIST_T *ptr_list, + OSAL_MDC_LIST_NODE_T *ptr_node, + void **pptr_node_data ) +{ + NPS_ERROR_NO_T rc = NPS_E_OTHERS; + + if (NULL != ptr_list) + { + if (NULL != ptr_node) + { + *pptr_node_data = ptr_node->ptr_data; + rc = NPS_E_OK; + } + } + + return (rc); +} + +static NPS_ERROR_NO_T +_osal_mdc_list_insertToHead( + OSAL_MDC_LIST_T *ptr_list, + void *ptr_data ) +{ + OSAL_MDC_LIST_NODE_T *ptr_new_node = NULL; + NPS_ERROR_NO_T rc = NPS_E_OTHERS; + + if (NULL != ptr_list) + { + ptr_new_node = osal_alloc(sizeof(OSAL_MDC_LIST_NODE_T)); + if (NULL != ptr_new_node) + { + ptr_new_node->ptr_data = ptr_data; + + /* no former node */ + ptr_new_node->ptr_prev = NULL; + + if (NULL != ptr_list->ptr_head_node) + { + ptr_list->ptr_head_node->ptr_prev = ptr_new_node; + ptr_new_node->ptr_next = ptr_list->ptr_head_node; + } + else + { + /* 1st node insertion */ + ptr_list->ptr_tail_node = ptr_new_node; + ptr_new_node->ptr_next = NULL; + } + + ptr_list->ptr_head_node = ptr_new_node; + ptr_list->node_cnt++; + rc = NPS_E_OK; + } + } + + return (rc); +} + +#if defined(NPS_EN_DMA_RESERVED) +static NPS_ERROR_NO_T +_osal_mdc_list_insertBefore( + OSAL_MDC_LIST_T *ptr_list, + OSAL_MDC_LIST_NODE_T *ptr_node, + void *ptr_data ) +{ + OSAL_MDC_LIST_NODE_T *ptr_new_node = NULL; + OSAL_MDC_LIST_NODE_T *ptr_prev_node = ptr_node->ptr_prev; + NPS_ERROR_NO_T rc = NPS_E_OTHERS; + + if (NULL != ptr_list) + { + if (NULL != ptr_node) + { + ptr_new_node = osal_alloc(sizeof(OSAL_MDC_LIST_NODE_T)); + if (NULL != ptr_new_node) + { + ptr_new_node->ptr_data = ptr_data; + + /* location */ + if (NULL != ptr_prev_node) + { + ptr_prev_node->ptr_next = ptr_new_node; + } + ptr_new_node->ptr_prev = ptr_prev_node; + ptr_new_node->ptr_next = ptr_node; + ptr_node->ptr_prev = ptr_new_node; + + /* update head if necessary */ + if (ptr_list->ptr_head_node == ptr_node) + { + ptr_list->ptr_head_node = ptr_new_node; + } + + ptr_list->node_cnt++; + rc = NPS_E_OK; + } + } + } + + return (rc); +} +#endif + +static NPS_ERROR_NO_T +_osal_mdc_list_deleteTargetNode( + OSAL_MDC_LIST_T *ptr_list, + OSAL_MDC_LIST_NODE_T *ptr_target_node) +{ + OSAL_MDC_LIST_NODE_T *ptr_prev_node = ptr_target_node->ptr_prev; + OSAL_MDC_LIST_NODE_T *ptr_next_node = ptr_target_node->ptr_next; + + if (ptr_target_node == ptr_list->ptr_head_node) + { + ptr_list->ptr_head_node = ptr_next_node; + if (NULL != ptr_next_node) + { + ptr_next_node->ptr_prev = NULL; + } + else + { + /* there's only 1 node in the list, and it gets removed */ + ptr_list->ptr_tail_node = NULL; + } + } + else if (ptr_target_node == ptr_list->ptr_tail_node) + { + /* at least 2 nodes in the list, and the target node locates tail */ + ptr_list->ptr_tail_node = ptr_prev_node; + ptr_prev_node->ptr_next = NULL; + } + else + { + /* intermediate node */ + ptr_prev_node->ptr_next = ptr_next_node; + ptr_next_node->ptr_prev = ptr_prev_node; + } + + osal_free(ptr_target_node); + ptr_list->node_cnt--; + + return (NPS_E_OK); +} + +static NPS_ERROR_NO_T +_osal_mdc_list_deleteByData( + OSAL_MDC_LIST_T *ptr_list, + void *ptr_delete_data) +{ + OSAL_MDC_LIST_NODE_T *ptr_tmp_node; + NPS_ERROR_NO_T rc = NPS_E_OTHERS; + + if (NULL != ptr_list) + { + ptr_tmp_node = ptr_list->ptr_head_node; + while (NULL != ptr_tmp_node) + { + if (ptr_tmp_node->ptr_data == ptr_delete_data) + { + rc = _osal_mdc_list_deleteTargetNode(ptr_list, ptr_tmp_node); + break; + } + else + { + ptr_tmp_node = ptr_tmp_node->ptr_next; + } + } + } + + return (rc); +} + + +static NPS_ERROR_NO_T +_osal_mdc_list_locateHead( + OSAL_MDC_LIST_T *ptr_list, + OSAL_MDC_LIST_NODE_T **pptr_node ) +{ + NPS_ERROR_NO_T rc = NPS_E_OTHERS; + + if (NULL != ptr_list) + { + *pptr_node = ptr_list->ptr_head_node; + if (NULL != *pptr_node) + { + rc = NPS_E_OK; + } + } + + return (rc); +} + +static NPS_ERROR_NO_T +_osal_mdc_list_next( + OSAL_MDC_LIST_T *ptr_list, + OSAL_MDC_LIST_NODE_T *ptr_node, + OSAL_MDC_LIST_NODE_T **pptr_next_node ) +{ + NPS_ERROR_NO_T rc = NPS_E_OTHERS; + + if (NULL != ptr_list) + { + if (NULL != ptr_node) + { + *pptr_next_node = ptr_node->ptr_next; + if (NULL != *pptr_next_node) + { + rc = NPS_E_OK; + } + } + } + + return (rc); +} + +#if defined(NPS_EN_DMA_RESERVED) +static NPS_ERROR_NO_T +_osal_mdc_list_prev( + OSAL_MDC_LIST_T *ptr_list, + OSAL_MDC_LIST_NODE_T *ptr_node, + OSAL_MDC_LIST_NODE_T **pptr_prev_node ) +{ + NPS_ERROR_NO_T rc = NPS_E_OTHERS; + + if (NULL != ptr_list) + { + if (NULL != ptr_node) + { + *pptr_prev_node = ptr_node->ptr_prev; + if (NULL != *pptr_prev_node) + { + rc = NPS_E_OK; + } + } + } + + return (rc); +} +#endif /* End of defined(NPS_EN_DMA_RESERVED) */ + +#endif /* End if defined(NPS_LINUX_KERNEL_MODE) */ + +/* GLOBAL VARIABLE DECLARATIONS + */ +static OSAL_MDC_CB_T _osal_mdc_cb; + +/* To let system callback function to access AML database */ +static AML_DEV_T *_ptr_osal_mdc_dev; + +/* Interface */ +struct pci_dev *_ptr_ext_pci_dev; + + +/* STATIC VARIABLE DECLARATIONS + */ +/* --------------------------------------------------------------------------- I2C interface */ +#if defined(AML_EN_I2C) +extern NPS_ERROR_NO_T +dev_switch_readBuffer( + const UI32_T addr, + const UI32_T addr_len, + UI8_T *ptr_buf, + const UI32_T buf_len); + +extern NPS_ERROR_NO_T +dev_switch_writeBuffer( + const UI32_T addr, + const UI32_T addr_len, + const UI8_T *ptr_buf, + const UI32_T buf_len); + +static NPS_ERROR_NO_T +_osal_mdc_readI2cReg( + const UI32_T unit, + const UI32_T offset, + UI32_T *ptr_data, + const UI32_T len) +{ + return dev_switch_readBuffer(offset, sizeof(offset), (UI8_T *)ptr_data, len); +} + +static NPS_ERROR_NO_T +_osal_mdc_writeI2cReg( + const UI32_T unit, + const UI32_T offset, + const UI32_T *ptr_data, + const UI32_T len) +{ + return dev_switch_writeBuffer(offset, sizeof(offset), (UI8_T *)ptr_data, len); +} + +static NPS_ERROR_NO_T +_osal_mdc_probeI2cDevice(void) +{ + /* I2C interface will be probed in BSP. */ + _ptr_osal_mdc_dev->if_type = AML_DEV_TYPE_I2C; + _ptr_osal_mdc_dev->access.read_callback = _osal_mdc_readI2cReg; + _ptr_osal_mdc_dev->access.write_callback = _osal_mdc_writeI2cReg; + + _ptr_osal_mdc_dev->id.device = HAL_DEVICE_ID_MT3258; + _ptr_osal_mdc_dev->id.vendor = HAL_MTK_VENDOR_ID; + _ptr_osal_mdc_dev->id.revision = HAL_REVISION_ID_E2; + + return (NPS_E_OK); +} + +static NPS_ERROR_NO_T +_osal_mdc_removeI2cDevice(void) +{ + /* I2C interface will be removed in BSP. */ + return (NPS_E_OK); +} + +/* --------------------------------------------------------------------------- PCI interface */ +#else + +static NPS_ERROR_NO_T +_osal_mdc_getPciMmioInfo( + struct pci_dev *pdev, + UI32_T **pptr_base_addr) +{ + NPS_ERROR_NO_T rc = NPS_E_OTHERS; + NPS_ADDR_T phy_addr; + UI32_T reg_space_sz; + + phy_addr = pci_resource_start(pdev, OSAL_MDC_PCI_BAR0_OFFSET); + reg_space_sz = pci_resource_len(pdev, OSAL_MDC_PCI_BAR0_OFFSET); + + if (0 == pci_request_region(pdev, OSAL_MDC_PCI_BAR0_OFFSET, OSAL_MDC_DRIVER_NAME)) + { + *pptr_base_addr = ioremap_nocache(phy_addr, reg_space_sz); + if (NULL != *pptr_base_addr) + { + rc = NPS_E_OK; + } + } + return (rc); +} + +NPS_ERROR_NO_T +osal_mdc_readPciReg( + const UI32_T unit, + const UI32_T offset, + UI32_T *ptr_data, + const UI32_T len) +{ + NPS_ERROR_NO_T rc = NPS_E_OK; + UI32_T idx; + UI32_T count; + volatile UI32_T *ptr_base_addr = _osal_mdc_cb.dev[unit].ptr_mmio_virt_addr; + + if (NULL != ptr_base_addr) + { + if (OSAL_MDC_PCI_BUS_WIDTH == len) + { + *ptr_data = *((UI32_T *)((NPS_HUGE_T)ptr_base_addr + offset)); + } + else + { + if (0 == (len % OSAL_MDC_PCI_BUS_WIDTH)) + { + count = len / OSAL_MDC_PCI_BUS_WIDTH; + for (idx = 0; idx < count; idx++) + { + *(ptr_data + idx) = *((UI32_T *)((NPS_HUGE_T)ptr_base_addr + offset + idx * 4)); + } + } + else + { + rc = NPS_E_OTHERS; + } + } + } + else + { + rc = NPS_E_NOT_INITED; + } + + return (rc); +} + +NPS_ERROR_NO_T +osal_mdc_writePciReg( + const UI32_T unit, + const UI32_T offset, + const UI32_T *ptr_data, + const UI32_T len) +{ + UI32_T idx; + UI32_T count; + volatile UI32_T *ptr_base_addr = _osal_mdc_cb.dev[unit].ptr_mmio_virt_addr; + NPS_ERROR_NO_T rc = NPS_E_OK; + + if (NULL != ptr_base_addr) + { + if (OSAL_MDC_PCI_BUS_WIDTH == len) + { + *((UI32_T *)((NPS_HUGE_T)ptr_base_addr + offset)) = *ptr_data; + } + else + { + if (0 == (len % OSAL_MDC_PCI_BUS_WIDTH)) + { + count = len / OSAL_MDC_PCI_BUS_WIDTH; + for (idx = 0; idx < count; idx++) + { + *((UI32_T *)((NPS_HUGE_T)ptr_base_addr + offset + idx * 4)) = *(ptr_data + idx); + } + } + else + { + rc = NPS_E_OTHERS; + } + } + } + else + { + rc = NPS_E_NOT_INITED; + } + + return (rc); +} + +static int +_osal_mdc_probePciCallback( + struct pci_dev *pdev, + const struct pci_device_id *id) +{ + int linux_rc; + UI16_T device_id; + UI16_T vendor_id; + UI8_T revision_id; + NPS_ERROR_NO_T rc = NPS_E_OK; + + linux_rc = pci_enable_device(pdev); + if (0 == linux_rc) + { + _ptr_osal_mdc_dev->if_type = AML_DEV_TYPE_PCI; + + pci_read_config_word(pdev, PCI_DEVICE_ID, &device_id); + pci_read_config_word(pdev, PCI_VENDOR_ID, &vendor_id); + pci_read_config_byte(pdev, PCI_REVISION_ID, &revision_id); + + _ptr_osal_mdc_dev->id.device = (UI32_T)device_id; + _ptr_osal_mdc_dev->id.vendor = (UI32_T)vendor_id; + _ptr_osal_mdc_dev->id.revision = (UI32_T)revision_id; + +#if defined(NPS_LINUX_KERNEL_MODE) + _ptr_osal_mdc_dev->access.read_callback = osal_mdc_readPciReg; + _ptr_osal_mdc_dev->access.write_callback = osal_mdc_writePciReg; +#endif + + rc = _osal_mdc_getPciMmioInfo(pdev, &_osal_mdc_cb.dev[_osal_mdc_cb.dev_num].ptr_mmio_virt_addr); + if (NPS_E_OK == rc) + { + /* Save the database to pdev structure for system callback to release recource, + * such like disconnecting ISR etc. + */ + _osal_mdc_cb.dev[_osal_mdc_cb.dev_num].irq = pdev->irq; + _osal_mdc_cb.dev[_osal_mdc_cb.dev_num].ptr_pci_dev = pdev; + _osal_mdc_cb.dev[_osal_mdc_cb.dev_num].unit = _osal_mdc_cb.dev_num; + + pci_set_drvdata(pdev, &_osal_mdc_cb.dev[_osal_mdc_cb.dev_num]); + + /* To set the bus master bit on device to enable the DMA transaction from PCIe EP to RC + * The bus master bit gets cleared when pci_disable_device() is called + */ + pci_set_master(pdev); + +#if !defined(NPS_EN_DMA_RESERVED) + if (NULL == _osal_mdc_cb.dma_info.ptr_dma_dev) + { + /* This variable is for dma_alloc_coherent */ + _osal_mdc_cb.dma_info.ptr_dma_dev = &pdev->dev; + } +#endif + _osal_mdc_cb.dev_num++; + _ptr_osal_mdc_dev++; + } + } + else + { + OSAL_MDC_ERR("enable pci dev failed, linux_rc=%d\n", linux_rc); + } + + return (0); +} + +static void +_osal_mdc_removePciCallback( + struct pci_dev *pdev) +{ + OSAL_MDC_DEV_T *ptr_dev = (OSAL_MDC_DEV_T *)pci_get_drvdata(pdev); + + iounmap(ptr_dev->ptr_mmio_virt_addr); + pci_release_region(pdev, OSAL_MDC_PCI_BAR0_OFFSET); + pci_disable_device(pdev); +} + +static struct pci_device_id _osal_mdc_id_table[] = +{ + {PCI_DEVICE(HAL_MTK_VENDOR_ID, PCI_ANY_ID)}, + {PCI_DEVICE(HAL_NP_VENDOR_ID, PCI_ANY_ID)}, +}; + +static struct pci_driver _osal_mdc_pci_driver = +{ + .name = OSAL_MDC_DRIVER_NAME, + .id_table = _osal_mdc_id_table, + .probe = _osal_mdc_probePciCallback, + .remove = _osal_mdc_removePciCallback, +}; + +static NPS_ERROR_NO_T +_osal_mdc_probePciDevice(void) +{ + NPS_ERROR_NO_T rc = NPS_E_OK; + + if (pci_register_driver(&_osal_mdc_pci_driver) < 0) + { + rc = NPS_E_OTHERS; + } + return (rc); +} + +static NPS_ERROR_NO_T +_osal_mdc_removePciDevice(void) +{ + pci_unregister_driver(&_osal_mdc_pci_driver); + return (NPS_E_OK); +} + +#endif /* End of AML_EN_I2C */ + +/* --------------------------------------------------------------------------- DMA */ +#if defined(NPS_LINUX_KERNEL_MODE) + +static NPS_ERROR_NO_T +_osal_mdc_searchDmaVirtAddr( + OSAL_MDC_LIST_T *ptr_dma_list, + const void *ptr_virt_addr, + OSAL_MDC_LIST_NODE_T **pptr_node, + OSAL_MDC_DMA_NODE_T **pptr_node_data) +{ + OSAL_MDC_LIST_NODE_T *ptr_curr_node; + OSAL_MDC_DMA_NODE_T *ptr_curr_node_data; + NPS_ERROR_NO_T rc; + + rc = osal_mdc_list_locateHead(ptr_dma_list, &ptr_curr_node); + while (NPS_E_OK == rc) + { + rc = osal_mdc_list_getNodeData(ptr_dma_list, ptr_curr_node, (void **)&ptr_curr_node_data); + if (NPS_E_OK == rc) + { + if (ptr_curr_node_data->ptr_virt_addr == ptr_virt_addr) + { + *pptr_node = ptr_curr_node; + *pptr_node_data = ptr_curr_node_data; + break; + } + rc = osal_mdc_list_next(ptr_dma_list, ptr_curr_node, &ptr_curr_node); + } + } + return (rc); +} + +static NPS_ERROR_NO_T +_osal_mdc_destroyDmaNodeList( + OSAL_MDC_DMA_INFO_T *ptr_dma_info) +{ + OSAL_MDC_LIST_T *ptr_dma_list = (OSAL_MDC_LIST_T *)ptr_dma_info->ptr_dma_list; + OSAL_MDC_LIST_NODE_T *ptr_curr_node = NULL; + OSAL_MDC_DMA_NODE_T *ptr_curr_node_data = NULL; + NPS_ERROR_NO_T rc = NPS_E_NOT_INITED; + + if (NULL != ptr_dma_list) + { + rc = osal_mdc_list_locateHead(ptr_dma_list, &ptr_curr_node); + while (NPS_E_OK == rc) + { + rc = osal_mdc_list_getNodeData(ptr_dma_list, ptr_curr_node, (void **)&ptr_curr_node_data); + if ((NPS_E_OK == rc) && (NULL != ptr_curr_node_data)) + { + rc = osal_mdc_list_deleteByData(ptr_dma_list, ptr_curr_node_data); + if (NPS_E_OK == rc) + { + kfree(ptr_curr_node_data); + } + } + rc = osal_mdc_list_locateHead(ptr_dma_list, &ptr_curr_node); + } + rc = osal_mdc_list_destroy(ptr_dma_list, NULL); + if (NPS_E_OK == rc) + { + ptr_dma_info->ptr_dma_list = NULL; + } + } + return (rc); +} + +#endif /* End of NPS_LINUX_KERNEL_MODE */ + +#if defined(NPS_EN_DMA_RESERVED) + +#if defined(NPS_LINUX_KERNEL_MODE) + +#if defined(OSAL_MDC_EN_TEST) +static NPS_ERROR_NO_T +_osal_mdc_dumpRsrvDmaList(void) +{ + OSAL_MDC_DMA_INFO_T *ptr_dma_info = &_osal_mdc_cb.dma_info; + OSAL_MDC_LIST_NODE_T *ptr_curr_node; + OSAL_MDC_DMA_NODE_T *ptr_curr_node_data; + UI32_T node = 0; + NPS_ERROR_NO_T rc = NPS_E_OK; + + rc = osal_mdc_list_locateHead((OSAL_MDC_LIST_T *)ptr_dma_info->ptr_dma_list, + &ptr_curr_node); + while (NPS_E_OK == rc) + { + rc = osal_mdc_list_getNodeData((OSAL_MDC_LIST_T *)ptr_dma_info->ptr_dma_list, + ptr_curr_node, (void **)&ptr_curr_node_data); + if (NPS_E_OK == rc) + { + OSAL_MDC_ERR( + "node %d. virt addr=%p, phy addr=%p, size=%d, avbl=%d\n", node, + ptr_curr_node_data->ptr_virt_addr, ptr_curr_node_data->phy_addr, + ptr_curr_node_data->size, ptr_curr_node_data->available); + } + rc = osal_mdc_list_next((OSAL_MDC_LIST_T *)ptr_dma_info->ptr_dma_list, + ptr_curr_node, &ptr_curr_node); + node++; + } + return (rc); +} +#endif + +static NPS_ERROR_NO_T +_osal_mdc_createRsrvDmaNodeList( + OSAL_MDC_DMA_INFO_T *ptr_dma_info) +{ + OSAL_MDC_DMA_NODE_T *ptr_node_data; + NPS_ERROR_NO_T rc; + + rc = osal_mdc_list_create(OSAL_MDC_DMA_LIST_SZ_UNLIMITED, + OSAL_MDC_LIST_TYPE_DOUBLE, + OSAL_MDC_DMA_LIST_NAME, + (OSAL_MDC_LIST_T **)&ptr_dma_info->ptr_dma_list); + if (NPS_E_OK == rc) + { + /* The first node, which contains all of the reserved memory */ + ptr_node_data = kmalloc(sizeof(OSAL_MDC_DMA_NODE_T), GFP_KERNEL); + if (NULL != ptr_node_data) + { + ptr_node_data->ptr_virt_addr = ptr_dma_info->ptr_rsrv_virt_addr; + ptr_node_data->phy_addr = ptr_dma_info->rsrv_phy_addr; + ptr_node_data->size = ptr_dma_info->rsrv_size; + ptr_node_data->available = TRUE; + rc = osal_mdc_list_insertToHead((OSAL_MDC_LIST_T *)ptr_dma_info->ptr_dma_list, + ptr_node_data); + } + else + { + rc = NPS_E_NO_MEMORY; + } + } + return (rc); +} + +static NPS_ERROR_NO_T +_osal_mdc_searchAvblRsrvDmaNode( + OSAL_MDC_LIST_T *ptr_dma_list, + const UI32_T size, + OSAL_MDC_LIST_NODE_T **pptr_avbl_node) +{ + OSAL_MDC_LIST_NODE_T *ptr_curr_node; + OSAL_MDC_DMA_NODE_T *ptr_curr_node_data; + NPS_ERROR_NO_T rc; + + rc = osal_mdc_list_locateHead(ptr_dma_list, &ptr_curr_node); + while (NPS_E_OK == rc) + { + rc = osal_mdc_list_getNodeData(ptr_dma_list, ptr_curr_node, (void **)&ptr_curr_node_data); + if (NPS_E_OK == rc) + { + if ((TRUE == ptr_curr_node_data->available) && (ptr_curr_node_data->size >= size)) + { + *pptr_avbl_node = ptr_curr_node; + break; + } + } + rc = osal_mdc_list_next(ptr_dma_list, ptr_curr_node, &ptr_curr_node); + } + return (rc); +} + +static NPS_ERROR_NO_T +_osal_mdc_splitRsrvDmaNodes( + OSAL_MDC_LIST_T *ptr_dma_list, + OSAL_MDC_LIST_NODE_T *ptr_ori_node, + const UI32_T size, + OSAL_MDC_DMA_NODE_T **pptr_new_node_data) +{ + OSAL_MDC_DMA_NODE_T *ptr_ori_node_data; + NPS_ERROR_NO_T rc; + + rc = osal_mdc_list_getNodeData(ptr_dma_list, ptr_ori_node, (void **)&ptr_ori_node_data); + + if (NPS_E_OK == rc) + { + *pptr_new_node_data = kmalloc(sizeof(OSAL_MDC_DMA_NODE_T), GFP_KERNEL); + + /* Create a new node */ + (*pptr_new_node_data)->size = size; + (*pptr_new_node_data)->phy_addr = ptr_ori_node_data->phy_addr; + (*pptr_new_node_data)->ptr_virt_addr = ptr_ori_node_data->ptr_virt_addr; + (*pptr_new_node_data)->available = TRUE; + + /* Update the original node */ + ptr_ori_node_data->size -= size; + ptr_ori_node_data->phy_addr += size; + ptr_ori_node_data->ptr_virt_addr = + (void *)((NPS_HUGE_T)ptr_ori_node_data->ptr_virt_addr + (NPS_HUGE_T)size); + + rc = osal_mdc_list_insertBefore(ptr_dma_list, ptr_ori_node, (void *)*pptr_new_node_data); + if (NPS_E_OK != rc) + { + OSAL_MDC_ERR("insert rsrv dma node to list failed, size=%d, rc=%d\n", size, rc); + /* Recovery */ + ptr_ori_node_data->size += size; + ptr_ori_node_data->phy_addr -= size; + ptr_ori_node_data->ptr_virt_addr = + (void *)((NPS_HUGE_T)ptr_ori_node_data->ptr_virt_addr - (NPS_HUGE_T)size); + kfree(*pptr_new_node_data); + } + } + return (rc); +} + +static void * +_osal_mdc_allocRsrvDmaMem( + OSAL_MDC_DMA_INFO_T *ptr_dma_info, + const UI32_T size) +{ + OSAL_MDC_LIST_T *ptr_dma_list = (OSAL_MDC_LIST_T *)ptr_dma_info->ptr_dma_list; + OSAL_MDC_LIST_NODE_T *ptr_node = NULL; + OSAL_MDC_DMA_NODE_T *ptr_node_data; + OSAL_MDC_DMA_NODE_T *ptr_new_node_data; + void *ptr_virt_addr = NULL; + NPS_ERROR_NO_T rc; + + rc = _osal_mdc_searchAvblRsrvDmaNode(ptr_dma_list, size, &ptr_node); + if (NPS_E_OK == rc) + { + rc = osal_mdc_list_getNodeData(ptr_dma_list, ptr_node, (void **)&ptr_node_data); + if (NPS_E_OK == rc) + { + /* If the node size just fit the user's requirement, just give it to user */ + if (ptr_node_data->size == size) + { + ptr_node_data->available = FALSE; + ptr_virt_addr = ptr_node_data->ptr_virt_addr; + } + /* or split a new node with user required size. */ + else + { + rc = _osal_mdc_splitRsrvDmaNodes(ptr_dma_list, ptr_node, size, &ptr_new_node_data); + if (NPS_E_OK == rc) + { + ptr_new_node_data->available = FALSE; + ptr_virt_addr = ptr_new_node_data->ptr_virt_addr; + } + } + } + } + return (ptr_virt_addr); +} + +static NPS_ERROR_NO_T +_osal_mdc_mergeTwoRsrvDmaNodes( + OSAL_MDC_LIST_T *ptr_dma_list, + OSAL_MDC_DMA_NODE_T *ptr_first_node_data, + OSAL_MDC_DMA_NODE_T *ptr_second_node_data) +{ + ptr_first_node_data->size += ptr_second_node_data->size; + + osal_mdc_list_deleteByData(ptr_dma_list, ptr_second_node_data); + return (NPS_E_OK); +} + +static NPS_ERROR_NO_T +_osal_mdc_mergeRsrvDmaNodes( + OSAL_MDC_LIST_T *ptr_dma_list, + OSAL_MDC_LIST_NODE_T *ptr_curr_node) +{ + OSAL_MDC_LIST_NODE_T *ptr_prev_node; + OSAL_MDC_LIST_NODE_T *ptr_next_node; + OSAL_MDC_DMA_NODE_T *ptr_curr_node_data; + OSAL_MDC_DMA_NODE_T *ptr_prev_node_data; + OSAL_MDC_DMA_NODE_T *ptr_next_node_data; + NPS_ERROR_NO_T rc; + + rc = osal_mdc_list_getNodeData(ptr_dma_list, ptr_curr_node, (void **)&ptr_curr_node_data); + if (NPS_E_OK == rc) + { + /* First, check if the previous node is available */ + rc = osal_mdc_list_prev(ptr_dma_list, ptr_curr_node, &ptr_prev_node); + if (NPS_E_OK == rc) + { + osal_mdc_list_getNodeData(ptr_dma_list, ptr_prev_node, (void **)&ptr_prev_node_data); + if (TRUE == ptr_prev_node_data->available) + { + _osal_mdc_mergeTwoRsrvDmaNodes(ptr_dma_list, ptr_prev_node_data, ptr_curr_node_data); + ptr_curr_node = ptr_prev_node; + ptr_curr_node_data = ptr_prev_node_data; + } + } + + /* then, check if the next node is available */ + rc = osal_mdc_list_next(ptr_dma_list, ptr_curr_node, &ptr_next_node); + if (NPS_E_OK == rc) + { + rc = osal_mdc_list_getNodeData(ptr_dma_list, ptr_next_node, (void **)&ptr_next_node_data); + if (NPS_E_OK == rc) + { + if (TRUE == ptr_next_node_data->available) + { + _osal_mdc_mergeTwoRsrvDmaNodes(ptr_dma_list, ptr_curr_node_data, ptr_next_node_data); + } + } + } + } + return (rc); +} + +static NPS_ERROR_NO_T +_osal_mdc_freeRsrvDmaMem( + OSAL_MDC_DMA_INFO_T *ptr_dma_info, + void *ptr_virt_addr) +{ + OSAL_MDC_LIST_T *ptr_dma_list = (OSAL_MDC_LIST_T *)ptr_dma_info->ptr_dma_list; + OSAL_MDC_LIST_NODE_T *ptr_node = NULL; + OSAL_MDC_DMA_NODE_T *ptr_node_data = NULL; + NPS_ERROR_NO_T rc; + + rc = _osal_mdc_searchDmaVirtAddr(ptr_dma_list, ptr_virt_addr, &ptr_node, &ptr_node_data); + if (NPS_E_OK == rc) + { + ptr_node_data->available = TRUE; + _osal_mdc_mergeRsrvDmaNodes(ptr_dma_list, ptr_node); + } + return (rc); +} + +#endif /* End of NPS_LINUX_KERNEL_MODE */ + +static NPS_ERROR_NO_T +_osal_mdc_initRsrvDmaMem( + OSAL_MDC_DMA_INFO_T *ptr_dma_info) +{ + struct resource *ptr_res; + NPS_ERROR_NO_T rc = NPS_E_OK; + + ptr_dma_info->rsrv_size = (NPS_ADDR_T)NPS_DMA_RESERVED_SZ * 1024 * 1024; + ptr_dma_info->rsrv_phy_addr = (NPS_ADDR_T)NPS_OS_MEMORY_SZ * 1024 * 1024; + ptr_res = request_mem_region(ptr_dma_info->rsrv_phy_addr, + ptr_dma_info->rsrv_size, "nps_rsrv_mem"); + if (NULL != ptr_res) + { +#if defined(OSAL_MDC_DMA_RESERVED_MEM_CACHEABLE) + ptr_dma_info->ptr_rsrv_virt_addr = ioremap(ptr_dma_info->rsrv_phy_addr, + ptr_dma_info->rsrv_size); +#else + ptr_dma_info->ptr_rsrv_virt_addr = ioremap_nocache(ptr_dma_info->rsrv_phy_addr, + ptr_dma_info->rsrv_size); +#endif + if (NULL == ptr_dma_info->ptr_rsrv_virt_addr) + { +#if defined(NPS_EN_64BIT_ADDR) || defined(NPS_EN_HOST_64_BIT_BIG_ENDIAN) || defined(NPS_EN_HOST_64_BIT_LITTLE_ENDIAN) + OSAL_MDC_ERR( + "ioremap_nocache() failed, phy addr=0x%llx, size=%llu\n", + ptr_dma_info->rsrv_phy_addr, ptr_dma_info->rsrv_size); +#else + OSAL_MDC_ERR( + "ioremap_nocache() failed, phy addr=0x%x, size=%u\n", + ptr_dma_info->rsrv_phy_addr, ptr_dma_info->rsrv_size); +#endif + rc = NPS_E_OTHERS; + } + } + else + { +#if defined(NPS_EN_64BIT_ADDR) || defined(NPS_EN_HOST_64_BIT_BIG_ENDIAN) || defined(NPS_EN_HOST_64_BIT_LITTLE_ENDIAN) + OSAL_MDC_ERR( + "request_mem_region() failed, phy addr=0x%llx, size=%llu\n", + ptr_dma_info->rsrv_phy_addr, ptr_dma_info->rsrv_size); +#else + OSAL_MDC_ERR( + "request_mem_region() failed, phy addr=0x%x, size=%u\n", + ptr_dma_info->rsrv_phy_addr, ptr_dma_info->rsrv_size); +#endif + rc = NPS_E_OTHERS; + } + return (rc); +} + +static NPS_ERROR_NO_T +_osal_mdc_deinitRsrvDmaMem( + OSAL_MDC_DMA_INFO_T *ptr_dma_info) +{ + if (NULL != ptr_dma_info->ptr_rsrv_virt_addr) + { + iounmap(ptr_dma_info->ptr_rsrv_virt_addr); + } + if (0x0 != ptr_dma_info->rsrv_phy_addr) + { + release_mem_region(ptr_dma_info->rsrv_phy_addr, + ptr_dma_info->rsrv_size); + } + return (NPS_E_OK); +} + + +#else /* Else of NPS_EN_DMA_RESERVED */ + + +#if defined(NPS_LINUX_KERNEL_MODE) + +#if defined(OSAL_MDC_EN_TEST) +static NPS_ERROR_NO_T +_osal_mdc_dumpSysDmaList(void) +{ + OSAL_MDC_DMA_INFO_T *ptr_dma_info = &_osal_mdc_cb.dma_info; + OSAL_MDC_LIST_NODE_T *ptr_curr_node; + OSAL_MDC_DMA_NODE_T *ptr_curr_node_data; + UI32_T node = 0; + NPS_ERROR_NO_T rc = NPS_E_OK; + + rc = osal_mdc_list_locateHead((OSAL_MDC_LIST_T *)ptr_dma_info->ptr_dma_list, + &ptr_curr_node); + while (NPS_E_OK == rc) + { + rc = osal_mdc_list_getNodeData((OSAL_MDC_LIST_T *)ptr_dma_info->ptr_dma_list, + ptr_curr_node, (void **)&ptr_curr_node_data); + if (NPS_E_OK == rc) + { + OSAL_MDC_ERR( + "node %d. virt addr=%p, phy addr=%p, size=%d\n", node, + ptr_curr_node_data->ptr_virt_addr, ptr_curr_node_data->phy_addr, + ptr_curr_node_data->size); + } + + rc = osal_mdc_list_next((OSAL_MDC_LIST_T *)ptr_dma_info->ptr_dma_list, + ptr_curr_node, &ptr_curr_node); + node++; + + } + return (rc); +} +#endif + +static NPS_ERROR_NO_T +_osal_mdc_createSysDmaNodeList( + OSAL_MDC_DMA_INFO_T *ptr_dma_info) +{ + NPS_ERROR_NO_T rc; + + rc = osal_mdc_list_create(OSAL_MDC_DMA_LIST_SZ_UNLIMITED, + OSAL_MDC_LIST_TYPE_SINGLY, + OSAL_MDC_DMA_LIST_NAME, + (OSAL_MDC_LIST_T **)&ptr_dma_info->ptr_dma_list); + return (rc); +} +#if !defined(NPS_LAMP) + +static void * +_osal_mdc_allocSysDmaMem( + OSAL_MDC_DMA_INFO_T *ptr_dma_info, + const UI32_T size) +{ + dma_addr_t phy_addr; + OSAL_MDC_DMA_NODE_T *ptr_node_data; + void *ptr_virt_addr = NULL; + NPS_ERROR_NO_T rc = NPS_E_OK; + + ptr_virt_addr = dma_alloc_coherent(ptr_dma_info->ptr_dma_dev, size, &phy_addr, GFP_ATOMIC); + if (NULL != ptr_virt_addr) + { + ptr_node_data = kmalloc(sizeof(OSAL_MDC_DMA_NODE_T), GFP_KERNEL); + ptr_node_data->phy_addr = (NPS_ADDR_T)phy_addr; + ptr_node_data->ptr_virt_addr = ptr_virt_addr; + ptr_node_data->size = size; + + rc = osal_mdc_list_insertToHead((OSAL_MDC_LIST_T *)ptr_dma_info->ptr_dma_list, ptr_node_data); + if (NPS_E_OK != rc) + { + kfree(ptr_node_data); + dma_free_coherent(ptr_dma_info->ptr_dma_dev, size, + ptr_virt_addr, phy_addr); + ptr_virt_addr = NULL; + } + } + return (ptr_virt_addr); +} + +static NPS_ERROR_NO_T +_osal_mdc_freeSysDmaMem( + OSAL_MDC_DMA_INFO_T *ptr_dma_info, + void *ptr_virt_addr) +{ + OSAL_MDC_LIST_NODE_T *ptr_node = NULL; + OSAL_MDC_DMA_NODE_T *ptr_node_data = NULL; + NPS_ERROR_NO_T rc; + + rc = _osal_mdc_searchDmaVirtAddr((OSAL_MDC_LIST_T *)ptr_dma_info->ptr_dma_list, + ptr_virt_addr, &ptr_node, &ptr_node_data); + if (NPS_E_OK == rc) + { + dma_free_coherent(ptr_dma_info->ptr_dma_dev, ptr_node_data->size, + ptr_virt_addr, ptr_node_data->phy_addr); + + osal_mdc_list_deleteByData((OSAL_MDC_LIST_T *)ptr_dma_info->ptr_dma_list, + ptr_node_data); + kfree(ptr_node_data); + } + return (rc); +} +#endif +#endif /* End of NPS_LINUX_KERNEL_MODE */ + +#endif /* End of NPS_EN_DMA_RESERVED */ + +#if defined(NPS_LINUX_KERNEL_MODE) + +void * +osal_mdc_allocDmaMem( + const UI32_T size) +{ + void *ptr_virt_addr = NULL; + +#if defined(NPS_LAMP) + ptr_virt_addr = osal_alloc(size); +#else + OSAL_MDC_DMA_INFO_T *ptr_dma_info = &_osal_mdc_cb.dma_info; + osal_takeSemaphore(&ptr_dma_info->sema, NPS_SEMAPHORE_WAIT_FOREVER); + +#if defined(NPS_EN_DMA_RESERVED) + ptr_virt_addr = _osal_mdc_allocRsrvDmaMem(ptr_dma_info, size); +#else + ptr_virt_addr = _osal_mdc_allocSysDmaMem(ptr_dma_info, size); +#endif + + osal_giveSemaphore(&ptr_dma_info->sema); +#endif + + return ptr_virt_addr; +} + +NPS_ERROR_NO_T +osal_mdc_freeDmaMem( + void *ptr_virt_addr) +{ + NPS_ERROR_NO_T rc = NPS_E_OK; + +#if defined(NPS_LAMP) + osal_free(ptr_virt_addr); +#else + OSAL_MDC_DMA_INFO_T *ptr_dma_info = &_osal_mdc_cb.dma_info; + + osal_takeSemaphore(&ptr_dma_info->sema, NPS_SEMAPHORE_WAIT_FOREVER); + +#if defined(NPS_EN_DMA_RESERVED) + rc = _osal_mdc_freeRsrvDmaMem(ptr_dma_info, ptr_virt_addr); +#else + + rc = _osal_mdc_freeSysDmaMem(ptr_dma_info, ptr_virt_addr); +#endif + osal_giveSemaphore(&ptr_dma_info->sema); + + if (NPS_E_OK != rc) + { + OSAL_MDC_ERR("free dma mem failed, virt addr=%p\n", ptr_virt_addr); + } +#endif + + return (rc); +} + +NPS_ERROR_NO_T +osal_mdc_convertPhyToVirt( + const NPS_ADDR_T phy_addr, + void **pptr_virt_addr) +{ + NPS_ERROR_NO_T rc = NPS_E_OK; + +#if defined(NPS_EN_DMA_RESERVED) + OSAL_MDC_DMA_INFO_T *ptr_dma_info = &_osal_mdc_cb.dma_info; + NPS_HUGE_T rsrv_virt_base = (NPS_HUGE_T)ptr_dma_info->ptr_rsrv_virt_addr; + NPS_ADDR_T rsrv_phy_base = ptr_dma_info->rsrv_phy_addr; +#endif + +#if defined(NPS_EN_DMA_RESERVED) + *pptr_virt_addr = (void *)(rsrv_virt_base + (NPS_HUGE_T)(phy_addr - rsrv_phy_base)); +#else + *pptr_virt_addr = NULL; + *pptr_virt_addr = phys_to_virt(phy_addr); + rc = (NULL == *pptr_virt_addr) ? (NPS_E_ENTRY_NOT_FOUND) : (NPS_E_OK); +#endif + +#if defined(AML_EN_CUSTOM_DMA_ADDR) + if (NPS_E_OK != rc) + { + /* Here the user may invoke the API for their private DMA + * address conversion. + */ + } +#endif + return (rc); +} + +NPS_ERROR_NO_T +osal_mdc_convertVirtToPhy( + void *ptr_virt_addr, + NPS_ADDR_T *ptr_phy_addr) +{ + NPS_ERROR_NO_T rc = NPS_E_OK; + +#if defined(NPS_EN_DMA_RESERVED) + OSAL_MDC_DMA_INFO_T *ptr_dma_info = &_osal_mdc_cb.dma_info; + NPS_HUGE_T rsrv_virt_base = (NPS_HUGE_T)ptr_dma_info->ptr_rsrv_virt_addr; + NPS_ADDR_T rsrv_phy_base = ptr_dma_info->rsrv_phy_addr; +#endif + +#if defined(NPS_EN_DMA_RESERVED) + *ptr_phy_addr = (NPS_ADDR_T)((NPS_HUGE_T)rsrv_phy_base + + (NPS_HUGE_T)ptr_virt_addr - rsrv_virt_base); +#else + *ptr_phy_addr = 0x0; + *ptr_phy_addr = virt_to_phys(ptr_virt_addr); + rc = (0x0 == *ptr_phy_addr) ? (NPS_E_ENTRY_NOT_FOUND) : (NPS_E_OK); +#endif + +#if defined(AML_EN_CUSTOM_DMA_ADDR) + if (NPS_E_OK != rc) + { + /* Here the user may invoke the API for their private DMA + * address conversion. + */ + } +#endif + return (rc); +} + +NPS_ERROR_NO_T +osal_mdc_initDmaMem(void) +{ + OSAL_MDC_DMA_INFO_T *ptr_dma_info = &_osal_mdc_cb.dma_info; + NPS_ERROR_NO_T rc = NPS_E_OK; + + rc = osal_createSemaphore(OSAL_MDC_DMA_SEMAPHORE_NAME, + NPS_SEMAPHORE_BINARY, &ptr_dma_info->sema); + if (NPS_E_OK == rc) + { +#if defined(NPS_EN_DMA_RESERVED) + rc = _osal_mdc_initRsrvDmaMem(ptr_dma_info); + if (NPS_E_OK == rc) + { + rc = _osal_mdc_createRsrvDmaNodeList(ptr_dma_info); + } +#else + rc = _osal_mdc_createSysDmaNodeList(ptr_dma_info); +#endif + } + return (rc); +} + +NPS_ERROR_NO_T +osal_mdc_deinitDmaMem(void) +{ + OSAL_MDC_DMA_INFO_T *ptr_dma_info = &_osal_mdc_cb.dma_info; + + /* Common function for both reserved/system memory. */ + _osal_mdc_destroyDmaNodeList(ptr_dma_info); + +#if defined(NPS_EN_DMA_RESERVED) + _osal_mdc_deinitRsrvDmaMem(ptr_dma_info); +#endif + + osal_destroySemaphore(&ptr_dma_info->sema); + + return (NPS_E_OK); +} + +NPS_ERROR_NO_T +osal_mdc_flushCache( + void *ptr_virt_addr, + const UI32_T size) +{ +#if defined(CONFIG_NOT_COHERENT_CACHE) || defined(CONFIG_DMA_NONCOHERENT) + +#if defined(dma_cache_wback_inv) + dma_cache_wback_inv((NPS_HUGE_T)ptr_virt_addr, size); +#else + dma_cache_sync(NULL, ptr_virt_addr, size, DMA_TO_DEVICE); +#endif + +#endif + return (NPS_E_OK); +} + +NPS_ERROR_NO_T +osal_mdc_invalidateCache( + void *ptr_virt_addr, + const UI32_T size) +{ +#if defined(CONFIG_NOT_COHERENT_CACHE) || defined(CONFIG_DMA_NONCOHERENT) + +#if defined(dma_cache_wback_inv) + dma_cache_wback_inv((NPS_HUGE_T)ptr_virt_addr, size); +#else + dma_cache_sync(NULL, ptr_virt_addr, size, DMA_FROM_DEVICE); +#endif + +#endif + return (NPS_E_OK); +} + +#endif /* End of NPS_LINUX_KERNEL_MODE */ + +/* --------------------------------------------------------------------------- Interrupt */ +#if defined(NPS_LINUX_USER_MODE) +static UI32_T _osal_mdc_isr_init_bitmap = 0; /* To record the dev request_irq */ +static UI32_T _osal_mdc_isr_dev_bitmap; /* To record the dev bitmap */ +static spinlock_t _osal_mdc_isr_dev_bitmap_lock; +static wait_queue_head_t _osal_mdc_isr_wait; +static UI32_T _osal_mdc_isr_mask_addr; +static UI32_T _osal_mdc_isr_mask_val; + + +static inline NPS_ERROR_NO_T +_osal_mdc_initInterrupt(void) +{ + /* init top and bottom halves */ + init_waitqueue_head(&_osal_mdc_isr_wait); + + /* init lock and clear device bitmap */ + spin_lock_init(&_osal_mdc_isr_dev_bitmap_lock); + _osal_mdc_isr_dev_bitmap = 0; + _osal_mdc_isr_init_bitmap = 0; + + /* clear chip interrupt mask address and value */ + _osal_mdc_isr_mask_addr = 0; + _osal_mdc_isr_mask_val = 0; + + return (NPS_E_OK); +} + +/* top half */ +static inline NPS_ERROR_NO_T +_osal_mdc_notifyUserProcess( + const UI32_T unit) +{ + unsigned long flags = 0; + + /* mask chip interrupt */ + osal_mdc_writePciReg(unit, _osal_mdc_isr_mask_addr, + &_osal_mdc_isr_mask_val, sizeof(UI32_T)); + + /* set the device bitmap. */ + spin_lock_irqsave(&_osal_mdc_isr_dev_bitmap_lock, flags); + _osal_mdc_isr_dev_bitmap |= (1 << unit); + spin_unlock_irqrestore(&_osal_mdc_isr_dev_bitmap_lock, flags); + + /* notify user process. */ + wake_up_interruptible(&_osal_mdc_isr_wait); + + return (NPS_E_OK); +} + +static inline NPS_ERROR_NO_T +_osal_mdc_waitEvent( + UI32_T *ptr_dev_bitmap) +{ + unsigned long flags = 0; + + wait_event_interruptible(_osal_mdc_isr_wait, (0 != _osal_mdc_isr_dev_bitmap)); + + /* save and clear the device bitmap. */ + spin_lock_irqsave(&_osal_mdc_isr_dev_bitmap_lock, flags); + *ptr_dev_bitmap = _osal_mdc_isr_dev_bitmap; + _osal_mdc_isr_dev_bitmap = 0; + spin_unlock_irqrestore(&_osal_mdc_isr_dev_bitmap_lock, flags); + + return (NPS_E_OK); +} + +static ssize_t +_osal_mdc_read( + struct file *filep, + char __user *buf, + size_t count, + loff_t *ppos) +{ + ssize_t ret; + UI32_T dev_bitmap = 0; + + if (count != sizeof(UI32_T)) + { + return -EINVAL; + } + + /* check if request_irq is inited. */ + if (0 != _osal_mdc_isr_init_bitmap) + { + _osal_mdc_waitEvent(&dev_bitmap); + } + + /* copy the device bitmap to user process. */ + if (0 != copy_to_user(buf, &dev_bitmap, count)) + { + ret = -EFAULT; + } + else + { + ret = count; + } + + return ret; +} +#endif /* End of NPS_LINUX_USER_MODE */ + +static irqreturn_t +_osal_mdc_systemIntrCallback( + int irq, + void *ptr_cookie) +{ + NPS_ERROR_NO_T rc = NPS_E_OK; + int linux_rc = IRQ_HANDLED; + OSAL_MDC_DEV_T *ptr_dev = (OSAL_MDC_DEV_T *)ptr_cookie; + + /* Invoke kernel callback, the callback function exist only in below cases: + * 1. SDK built in kernel mode + * 2. SDK built in user mode, NetIF kernel module is enabled + */ + if (NULL != ptr_dev->isr_callback) + { + rc = ptr_dev->isr_callback(ptr_dev->ptr_isr_data); + if (NPS_E_OK != rc) + { + OSAL_MDC_ERR("handle irq failed, rc=%d\n", rc); + linux_rc = IRQ_NONE; + } + } + +#if defined(NPS_LINUX_USER_MODE) + /* Notify user process */ + rc = _osal_mdc_notifyUserProcess(ptr_dev->unit); + if (NPS_E_OK != rc) + { + OSAL_MDC_ERR("notify intr to usr failed, rc=%d\n", rc); + linux_rc = IRQ_NONE; + } +#endif + + return (linux_rc); +} + +NPS_ERROR_NO_T +osal_mdc_registerIsr( + const UI32_T unit, + AML_DEV_ISR_FUNC_T handler, + void *ptr_cookie) +{ + OSAL_MDC_DEV_T *ptr_dev = &_osal_mdc_cb.dev[unit]; + + ptr_dev->isr_callback = handler; + ptr_dev->ptr_isr_data = (void *)((NPS_HUGE_T)unit); + + return (NPS_E_OK); +} + +NPS_ERROR_NO_T +osal_mdc_connectIsr( + const UI32_T unit, + AML_DEV_ISR_FUNC_T handler, + AML_DEV_ISR_DATA_T *ptr_cookie) +{ + OSAL_MDC_DEV_T *ptr_dev = &_osal_mdc_cb.dev[unit]; + int linux_rc = 0; + NPS_ERROR_NO_T rc = NPS_E_OK; + +#if defined(NPS_LINUX_USER_MODE) + if (NULL != ptr_cookie) + { + _osal_mdc_isr_mask_addr = ptr_cookie->mask_addr; + _osal_mdc_isr_mask_val = ptr_cookie->mask_val; + } +#endif + + if (NULL == ptr_dev->isr_callback) + { +#if defined(NPS_LINUX_KERNEL_MODE) + /* In user mode, the following database is created in user space. */ + ptr_dev->isr_callback = handler; + ptr_dev->ptr_isr_data = (void *)((NPS_HUGE_T)unit); +#endif + +#if defined(OSAL_MDC_EN_MSI) + /* If "no_msi" flag is set, it means the device doesn't support MSI. */ + if (1 != ptr_dev->ptr_pci_dev->no_msi) + { + linux_rc = pci_enable_msi(ptr_dev->ptr_pci_dev); + if (0 == linux_rc) + { + /* The system gives a new irq number if MSI is enabled sucessfully. */ + ptr_dev->irq = ptr_dev->ptr_pci_dev->irq; + } + else + { + OSAL_MDC_ERR("pci_enable_msi() failed, rc=%d\n", linux_rc); + rc = NPS_E_OTHERS; + } + } +#endif + + linux_rc = request_irq(ptr_dev->irq, _osal_mdc_systemIntrCallback, + 0, OSAL_MDC_DRIVER_NAME, (void *)ptr_dev); + + if (0 != linux_rc) + { + OSAL_MDC_ERR("request_irq() failed, rc=%d\n", linux_rc); + rc = NPS_E_OTHERS; + } + } + else + { + OSAL_MDC_ERR("double req isr err\n"); + rc = NPS_E_OTHERS; + } + return (rc); +} + +NPS_ERROR_NO_T +osal_mdc_disconnectIsr( + const UI32_T unit) +{ + OSAL_MDC_DEV_T *ptr_dev = &_osal_mdc_cb.dev[unit]; + + free_irq(ptr_dev->irq, (void *)ptr_dev); + +#if defined(OSAL_MDC_EN_MSI) + /* Must free the irq before disabling MSI */ + pci_disable_msi(ptr_dev->ptr_pci_dev); +#endif + +#if defined(NPS_LINUX_KERNEL_MODE) + ptr_dev->isr_callback = NULL; + ptr_dev->ptr_isr_data = NULL; +#endif + +#if defined(NPS_LINUX_USER_MODE) + _osal_mdc_isr_mask_addr = 0x0; + _osal_mdc_isr_mask_val = 0x0; +#endif + + return (NPS_E_OK); +} + +NPS_ERROR_NO_T +osal_mdc_initDevice( + AML_DEV_T *ptr_dev_list, + UI32_T *ptr_dev_num) +{ + OSAL_MDC_CB_T *ptr_cb = &_osal_mdc_cb; + NPS_ERROR_NO_T rc = NPS_E_OK; + + _ptr_osal_mdc_dev = ptr_dev_list; + + memset(ptr_cb, 0x0, sizeof(OSAL_MDC_CB_T)); + +#if defined(AML_EN_I2C) + rc = _osal_mdc_probeI2cDevice(); + *ptr_dev_num = 1; +#else + rc = _osal_mdc_probePciDevice(); + *ptr_dev_num = ptr_cb->dev_num; + + _ptr_ext_pci_dev = _osal_mdc_cb.dev[0].ptr_pci_dev; +#endif /* End of AML_EN_I2C */ + + return (rc); +} + +NPS_ERROR_NO_T +osal_mdc_deinitDevice(void) +{ + NPS_ERROR_NO_T rc = NPS_E_OK; + +#if defined(AML_EN_I2C) + rc = _osal_mdc_removeI2cDevice(); +#else + if (NULL != _ptr_ext_pci_dev) + { + rc = _osal_mdc_removePciDevice(); + _ptr_ext_pci_dev = NULL; + } +#endif + + return (rc); +} + +/*****************************************************************************/ +#if defined(NPS_LINUX_USER_MODE) +/* Interface */ +static UI32_T _osal_mdc_devInited = 0; + +/* DMA */ +#if defined(NPS_EN_DMA_RESERVED) +static UI32_T _osal_mdc_rsvDmaInited = 0; +#else +static struct list_head _osal_mdc_sysDmaList[2]; /* To avoid memory corruption when cold-boot */ +static UI32_T _osal_mdc_sysCurDmaListIdx = 0; +#endif + +/* IOCTL */ +static OSAL_MDC_IOCTL_CB_T _osal_mdc_ioctl_cb; +static AML_DEV_T _osal_mdc_ioctl_dev[NPS_CFG_MAXIMUM_CHIPS_PER_SYSTEM] = {}; + +static int +_osal_mdc_open( + struct inode *ptr_inode, + struct file *ptr_file) +{ + return (0); +} + +static int +_osal_mdc_release( + struct inode *ptr_inode, + struct file *ptr_file) +{ + return (0); +} + +static struct vm_operations_struct _osal_mdc_remap_vm_ops = +{ + .open = NULL, + .close = NULL, +}; + +static int +_osal_mdc_mmap( + struct file *filp, + struct vm_area_struct *vma) +{ + size_t size = vma->vm_end - vma->vm_start; + int linux_rc = 0; + +#if LINUX_VERSION_CODE <= KERNEL_VERSION(3,0,48) + pgprot_val(vma->vm_page_prot) |= (_PAGE_NO_CACHE | _PAGE_GUARDED); +#else + UI32_T dev_idx; + OSAL_MDC_DEV_T *ptr_dev; + NPS_ADDR_T phy_addr = vma->vm_pgoff << PAGE_SHIFT; + + /* check mmio base phy addr */ + for (dev_idx = 0, ptr_dev = &_osal_mdc_cb.dev[0]; + dev_idx < _osal_mdc_cb.dev_num; + dev_idx++, ptr_dev++) + { + if ((NULL != ptr_dev->ptr_pci_dev) && + (phy_addr == pci_resource_start(ptr_dev->ptr_pci_dev, OSAL_MDC_PCI_BAR0_OFFSET))) + { + vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); + break; + } + } +#endif + + vma->vm_flags |= VM_IO; + if (io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff, + size, vma->vm_page_prot)) + { + linux_rc = -EAGAIN; + } + vma->vm_ops = &_osal_mdc_remap_vm_ops; + return (linux_rc); +} + +#if defined(NPS_EN_DMA_RESERVED) + +static NPS_ERROR_NO_T +_osal_mdc_ioctl_initRsrvDmaMemCallback( + const UI32_T unit, + void *ptr_data) +{ + OSAL_MDC_DMA_INFO_T *ptr_dma_info = &_osal_mdc_cb.dma_info; + OSAL_MDC_IOCTL_DMA_DATA_T *ptr_ioctl_data = (OSAL_MDC_IOCTL_DMA_DATA_T *)ptr_data; + NPS_ERROR_NO_T rc = NPS_E_OK; + + if (0 == _osal_mdc_rsvDmaInited) + { + rc = _osal_mdc_initRsrvDmaMem(ptr_dma_info); + _osal_mdc_rsvDmaInited = 1; + } + ptr_ioctl_data->rsrv_dma_phy_addr = ptr_dma_info->rsrv_phy_addr; + ptr_ioctl_data->rsrv_dma_size = ptr_dma_info->rsrv_size; + + return (rc); +} + +static NPS_ERROR_NO_T +_osal_mdc_ioctl_deinitRsrvDmaMemCallback( + const UI32_T unit, + void *ptr_data) +{ + NPS_ERROR_NO_T rc = NPS_E_OK; + + rc = _osal_mdc_deinitRsrvDmaMem(&_osal_mdc_cb.dma_info); + _osal_mdc_rsvDmaInited = 0; + + return (rc); +} + +#else + +static NPS_ERROR_NO_T +_osal_mdc_clearSysDmaList( + UI32_T dmaListIdx) +{ + OSAL_MDC_DMA_INFO_T *ptr_dma_info = &_osal_mdc_cb.dma_info; + OSAL_MDC_USER_MODE_DMA_NODE_T *ptr_curr_node_data = NULL; + OSAL_MDC_USER_MODE_DMA_NODE_T *ptr_next_node_data = NULL; + void *ptr_virt_addr; + + list_for_each_entry_safe(ptr_curr_node_data, ptr_next_node_data, + &_osal_mdc_sysDmaList[dmaListIdx], list) + { + list_del(&(ptr_curr_node_data->list)); + ptr_virt_addr = phys_to_virt(ptr_curr_node_data->phy_addr); + dma_free_coherent(ptr_dma_info->ptr_dma_dev, + ptr_curr_node_data->size, ptr_virt_addr, + ptr_curr_node_data->phy_addr); + kfree(ptr_curr_node_data); + } + + return (NPS_E_OK); +} + +static NPS_ERROR_NO_T +_osal_mdc_ioctl_allocSysDmaMemCallback( + const UI32_T unit, + void *ptr_data) +{ + OSAL_MDC_DMA_INFO_T *ptr_dma_info = &_osal_mdc_cb.dma_info; + OSAL_MDC_IOCTL_DMA_DATA_T *ptr_ioctl_data = (OSAL_MDC_IOCTL_DMA_DATA_T *)ptr_data; + OSAL_MDC_USER_MODE_DMA_NODE_T *ptr_node_data = NULL; + +/* To defense the compatible data type of 32bit and 64bit are not synchronized */ +#if defined(CONFIG_ARCH_DMA_ADDR_T_64BIT) /* Bus addressing is 64-bit */\ +&& !defined(NPS_EN_64BIT_ADDR) /* SDK follows HOST with 32-bit addr*/\ +&& (defined(NPS_EN_HOST_32_BIT_LITTLE_ENDIAN) || defined(NPS_EN_HOST_32_BIT_BIG_ENDIAN))/* HOST is 32-bit */ +#error "The DMA address of OS is 64bit. Please enable NPS_EN_64BIT_ADDR in SDK." +#endif + +#if !defined(CONFIG_ARCH_DMA_ADDR_T_64BIT) && defined(NPS_EN_64BIT_ADDR) +#error "The DMA address of OS is not 64bit. Please disable NPS_EN_64BIT_ADDR in SDK." +#endif + + if (NULL == dma_alloc_coherent(ptr_dma_info->ptr_dma_dev, ptr_ioctl_data->size, + (dma_addr_t *)&ptr_ioctl_data->phy_addr, GFP_ATOMIC)) + { + return (NPS_E_NO_MEMORY); + } + + ptr_node_data = kmalloc(sizeof(OSAL_MDC_USER_MODE_DMA_NODE_T), GFP_KERNEL); + if (NULL != ptr_node_data) + { + memset(ptr_node_data, 0, sizeof(OSAL_MDC_USER_MODE_DMA_NODE_T)); + ptr_node_data->phy_addr = ptr_ioctl_data->phy_addr; + ptr_node_data->size = ptr_ioctl_data->size; + list_add(&(ptr_node_data->list), &_osal_mdc_sysDmaList[_osal_mdc_sysCurDmaListIdx]); + } + else + { + dma_free_coherent(ptr_dma_info->ptr_dma_dev, ptr_ioctl_data->size, + phys_to_virt(ptr_ioctl_data->phy_addr), ptr_ioctl_data->phy_addr); + + return (NPS_E_NO_MEMORY); + } + + return (NPS_E_OK); +} + +static NPS_ERROR_NO_T +_osal_mdc_ioctl_freeSysDmaMemCallback( + const UI32_T unit, + void *ptr_data) +{ + OSAL_MDC_DMA_INFO_T *ptr_dma_info = &_osal_mdc_cb.dma_info; + OSAL_MDC_IOCTL_DMA_DATA_T *ptr_ioctl_data = (OSAL_MDC_IOCTL_DMA_DATA_T *)ptr_data; + void *ptr_virt_addr; + + OSAL_MDC_USER_MODE_DMA_NODE_T *ptr_curr_node_data = NULL; + OSAL_MDC_USER_MODE_DMA_NODE_T *ptr_next_node_data = NULL; + + + list_for_each_entry_safe(ptr_curr_node_data, ptr_next_node_data, + &_osal_mdc_sysDmaList[_osal_mdc_sysCurDmaListIdx], list) + { + if (ptr_curr_node_data->phy_addr == ptr_ioctl_data->phy_addr) + { + list_del(&(ptr_curr_node_data->list)); + kfree(ptr_curr_node_data); + break; + } + } + + ptr_virt_addr = phys_to_virt(ptr_ioctl_data->phy_addr); + dma_free_coherent(ptr_dma_info->ptr_dma_dev, ptr_ioctl_data->size, + ptr_virt_addr, ptr_ioctl_data->phy_addr); + + return (NPS_E_OK); +} + +#endif + +static NPS_ERROR_NO_T +_osal_mdc_getPciInfoToIoctlData( + const OSAL_MDC_DEV_T *ptr_dev_list, + OSAL_MDC_IOCTL_DEV_DATA_T *ptr_dev_data) +{ + UI32_T idx; + + /* Search for PCIe device and get the MMIO base address. */ + for (idx = 0; idx < _osal_mdc_cb.dev_num; idx++) + { + if (NULL != ptr_dev_list[idx].ptr_pci_dev) + { + ptr_dev_data->pci_mmio_phy_start[idx] = + pci_resource_start(ptr_dev_list[idx].ptr_pci_dev, OSAL_MDC_PCI_BAR0_OFFSET); + + ptr_dev_data->pci_mmio_size[idx] = + pci_resource_len(ptr_dev_list[idx].ptr_pci_dev, OSAL_MDC_PCI_BAR0_OFFSET); + } + } + return (NPS_E_OK); +} + +static NPS_ERROR_NO_T +_osal_mdc_getDeviceIdToIoctlData( + AML_DEV_T *ptr_dev, + OSAL_MDC_IOCTL_DEV_DATA_T *ptr_dev_data, + const UI32_T dev_num) +{ + UI32_T idx; + + for (idx = 0; idx < ptr_dev_data->dev_num; idx++) + { + ptr_dev_data->id[idx].device = ptr_dev[idx].id.device; + ptr_dev_data->id[idx].vendor = ptr_dev[idx].id.vendor; + ptr_dev_data->id[idx].revision = ptr_dev[idx].id.revision; + } + return (NPS_E_OK); +} + +static NPS_ERROR_NO_T +_osal_mdc_ioctl_initDeviceCallback( + const UI32_T unit, + void *ptr_data) +{ + OSAL_MDC_CB_T *ptr_cb = &_osal_mdc_cb; + OSAL_MDC_DEV_T *ptr_dev_list = _osal_mdc_cb.dev; + OSAL_MDC_IOCTL_DEV_DATA_T *ptr_ioctl_data = (OSAL_MDC_IOCTL_DEV_DATA_T *)ptr_data; + + /* "dev" is just created for invoking osal_mdc_initDevice, + * it is no use once the device IDs are copy to ptr_ioctl_data. + */ + + NPS_ERROR_NO_T rc = NPS_E_OK; + + if (0 == _osal_mdc_devInited) + { + rc = osal_mdc_initDevice(_osal_mdc_ioctl_dev, &ptr_ioctl_data->dev_num); + } + else + { + /* ptr_cb->dev_num was initialized in osal_mdc_initDevice(); */ + ptr_ioctl_data->dev_num = ptr_cb->dev_num; + } + + if (ptr_cb->dev_num >= NPS_CFG_MAXIMUM_CHIPS_PER_SYSTEM) + { + OSAL_MDC_ERR("dev num=%d > max support num=%d\n", + ptr_ioctl_data->dev_num, NPS_CFG_MAXIMUM_CHIPS_PER_SYSTEM); + } + +#if !defined(NPS_EN_DMA_RESERVED) + if (0 == _osal_mdc_devInited) + { + /* Create two DMA memory lists and use 1st. */ + INIT_LIST_HEAD(&_osal_mdc_sysDmaList[0]); + INIT_LIST_HEAD(&_osal_mdc_sysDmaList[1]); + _osal_mdc_sysCurDmaListIdx = 0; + } + else + { + /* Delay free the old list until the chip is reset. + * When we kill the process, the chip continues to write to the DMA memory. + * If we free the old DMA memory before stopping the chip, there could be memory corruption. + */ + _osal_mdc_sysCurDmaListIdx = ((_osal_mdc_sysCurDmaListIdx + 1) & 0x1); + rc = _osal_mdc_clearSysDmaList(_osal_mdc_sysCurDmaListIdx); + } +#endif + + if (NPS_E_OK == rc) + { + rc = _osal_mdc_getDeviceIdToIoctlData(_osal_mdc_ioctl_dev, ptr_ioctl_data, ptr_ioctl_data->dev_num); + } + if (NPS_E_OK == rc) + { + rc = _osal_mdc_getPciInfoToIoctlData(ptr_dev_list, ptr_ioctl_data); + } + + _osal_mdc_devInited = 1; + return (rc); +} + +static NPS_ERROR_NO_T +_osal_mdc_ioctl_deinitDeviceCallback( + const UI32_T unit, + void *ptr_data) +{ + NPS_ERROR_NO_T rc = NPS_E_OK; + +#if !defined(NPS_EN_DMA_RESERVED) + _osal_mdc_clearSysDmaList(0); + _osal_mdc_clearSysDmaList(1); +#endif + + if (0 != _osal_mdc_devInited) + { + rc = osal_mdc_deinitDevice(); + _osal_mdc_devInited = 0; + } + + return (rc); +} + +static NPS_ERROR_NO_T +_osal_mdc_ioctl_connectIsrCallback( + const UI32_T unit, + void *ptr_data) +{ + NPS_ERROR_NO_T rc = NPS_E_OK; + + if (0 == (_osal_mdc_isr_init_bitmap & (1 << unit))) + { + rc = osal_mdc_connectIsr(unit, NULL, ptr_data); + if (NPS_E_OK == rc) + { + _osal_mdc_isr_init_bitmap |= (1 << unit); + } + } + return (rc); +} + +static NPS_ERROR_NO_T +_osal_mdc_ioctl_disconnectIsrCallback( + const UI32_T unit, + void *ptr_data) +{ + /* To make the user-space polling task return from read. */ + _osal_mdc_notifyUserProcess(unit); + + osal_mdc_disconnectIsr(unit); + _osal_mdc_isr_init_bitmap &= ~(1 << unit); + + return (NPS_E_OK); +} + +static NPS_ERROR_NO_T +_osal_mdc_registerIoctlCallback( + const OSAL_MDC_IOCTL_TYPE_T type, + const OSAL_MDC_IOCTL_CALLBACK_FUNC_T func) +{ + OSAL_MDC_IOCTL_CB_T *ptr_cb = &_osal_mdc_ioctl_cb; + NPS_ERROR_NO_T rc = NPS_E_OTHERS; + + if (type < OSAL_MDC_IOCTL_TYPE_LAST) + { + if (NULL == ptr_cb->callback[type]) + { + ptr_cb->callback[type] = func; + rc = NPS_E_OK; + } + else + { + OSAL_MDC_ERR("register ioctl callback failed, type=%d exist\n", type); + } + } + else + { + OSAL_MDC_ERR("register ioctl callback failed, type=%d >= max=%d\n", + type, OSAL_MDC_IOCTL_TYPE_LAST); + } + return (rc); +} + +static NPS_ERROR_NO_T +_osal_mdc_initIoctl(void) +{ + memset(&_osal_mdc_ioctl_cb, 0x0, sizeof(OSAL_MDC_IOCTL_CB_T)); + + _osal_mdc_registerIoctlCallback(OSAL_MDC_IOCTL_TYPE_MDC_INIT_DEV, + _osal_mdc_ioctl_initDeviceCallback); + + _osal_mdc_registerIoctlCallback(OSAL_MDC_IOCTL_TYPE_MDC_DEINIT_DEV, + _osal_mdc_ioctl_deinitDeviceCallback); +#if defined(NPS_EN_DMA_RESERVED) + _osal_mdc_registerIoctlCallback(OSAL_MDC_IOCTL_TYPE_MDC_INIT_RSRV_DMA_MEM, + _osal_mdc_ioctl_initRsrvDmaMemCallback); + + _osal_mdc_registerIoctlCallback(OSAL_MDC_IOCTL_TYPE_MDC_DEINIT_RSRV_DMA_MEM, + _osal_mdc_ioctl_deinitRsrvDmaMemCallback); +#else + _osal_mdc_registerIoctlCallback(OSAL_MDC_IOCTL_TYPE_MDC_ALLOC_SYS_DMA_MEM, + _osal_mdc_ioctl_allocSysDmaMemCallback); + + _osal_mdc_registerIoctlCallback(OSAL_MDC_IOCTL_TYPE_MDC_FREE_SYS_DMA_MEM, + _osal_mdc_ioctl_freeSysDmaMemCallback); +#endif + _osal_mdc_registerIoctlCallback(OSAL_MDC_IOCTL_TYPE_MDC_CONNECT_ISR, + _osal_mdc_ioctl_connectIsrCallback); + + _osal_mdc_registerIoctlCallback(OSAL_MDC_IOCTL_TYPE_MDC_DISCONNECT_ISR, + _osal_mdc_ioctl_disconnectIsrCallback); + return (NPS_E_OK); +} + +static long +_osal_mdc_ioctl( + struct file *filp, + unsigned int cmd, + unsigned long arg) +{ +#define OSAL_MDC_IOCTL_LOCAL_BUF_SIZE (128) + + OSAL_MDC_IOCTL_CB_T *ptr_cb = &_osal_mdc_ioctl_cb; + OSAL_MDC_IOCTL_CMD_T *ptr_cmd = (OSAL_MDC_IOCTL_CMD_T *)&cmd; + UI32_T unit = ptr_cmd->field.unit; + OSAL_MDC_IOCTL_TYPE_T type = ptr_cmd->field.type; + OSAL_MDC_IOCTL_ACCESS_T access = ptr_cmd->field.access; + UI32_T data_size = ptr_cmd->field.size; + UI8_T temp_buf[OSAL_MDC_IOCTL_LOCAL_BUF_SIZE]; + UI8_T *ptr_temp_buf; + int linux_rc = 0; + + if (NULL != ptr_cb->callback[type]) + { + if (data_size > OSAL_MDC_IOCTL_LOCAL_BUF_SIZE) + { + ptr_temp_buf = kmalloc(data_size, GFP_KERNEL); + } + else + { + ptr_temp_buf = temp_buf; + } + + /*************************************************************/ + if (OSAL_MDC_IOCTL_ACCESS_WRITE == access) + { + /* type: FREE_SYS_DMA_MEM : DMA physical address + * CONNECT_ISR : Chip interrupt mask address and value + */ + if (copy_from_user(ptr_temp_buf, (int __user *)arg, data_size)) + { + linux_rc = -EFAULT; + } + else + { + if (NPS_E_OK != ptr_cb->callback[type](unit, (void *)ptr_temp_buf)) + { + linux_rc = -EFAULT; + } + } + } + else if (OSAL_MDC_IOCTL_ACCESS_READ == access) + { + /* type: INIT_DEV : PCIe device and vendor ID, mmio address and size + * INIT_RSRV_DMA_MEM : Reserved DMA physical address and size + */ + if (NPS_E_OK != ptr_cb->callback[type](unit, (void *)ptr_temp_buf)) + { + linux_rc = -EFAULT; + } + else + { + if (copy_to_user((int __user *)arg, ptr_temp_buf, data_size)) + { + linux_rc = -EFAULT; + } + } + } + else if (OSAL_MDC_IOCTL_ACCESS_READ_WRITE == access) + { + /* type: ALLOC_SYS_DMA_MEM : DMA physical address + */ + if (copy_from_user(ptr_temp_buf, (int __user *)arg, data_size)) + { + linux_rc = -EFAULT; + } + else + { + if (NPS_E_OK != ptr_cb->callback[type](unit, (void *)ptr_temp_buf)) + { + linux_rc = -EFAULT; + } + else + { + if (copy_to_user((int __user *)arg, ptr_temp_buf, data_size)) + { + linux_rc = -EFAULT; + } + } + } + } + else if (OSAL_MDC_IOCTL_ACCESS_NONE == access) + { + /* type: DEINIT_DEV + * DEINIT_RSRV_DMA_MEM + * DISCONNECT_ISR + */ + if (NPS_E_OK != ptr_cb->callback[type](unit, (void *)ptr_temp_buf)) + { + linux_rc = -EFAULT; + } + } + /*************************************************************/ + + if (data_size > OSAL_MDC_IOCTL_LOCAL_BUF_SIZE) + { + kfree(ptr_temp_buf); + } + } + else + { + OSAL_MDC_ERR("invalid ioctl, cmd=%u, arg=%lu, type=%d\n", cmd, arg, type); + } + return (linux_rc); +} + +#ifdef CONFIG_COMPAT +static long +_osal_mdc_compat_ioctl( + struct file *filp, + unsigned int cmd, + unsigned long arg) +{ + return _osal_mdc_ioctl(filp, cmd, (unsigned long)compat_ptr(arg)); +} +#endif + +static struct file_operations _osal_mdc_fops = +{ + .owner = THIS_MODULE, + .open = _osal_mdc_open, + .read = _osal_mdc_read, + .release = _osal_mdc_release, + .unlocked_ioctl = _osal_mdc_ioctl, +#ifdef CONFIG_COMPAT + .compat_ioctl = _osal_mdc_compat_ioctl, +#endif + .mmap = _osal_mdc_mmap, +}; + +static struct miscdevice _osal_mdc_misc = +{ + .minor = OSAL_MDC_DRIVER_MISC_MINOR_NUM, + .name = OSAL_MDC_DRIVER_NAME, + .fops = & _osal_mdc_fops, +}; + +static int __init +osal_mdc_module_init(void) +{ + int linux_rc; + + _osal_mdc_initIoctl(); /* To register IOCTL callback functions. */ + _osal_mdc_initInterrupt(); /* To init structs for top and bottom halves */ + + linux_rc = misc_register(&_osal_mdc_misc); + if (0 != linux_rc) + { + OSAL_MDC_ERR("register dev %s failed, linux_rc=%d\n", OSAL_MDC_DRIVER_NAME, linux_rc); + } + return (linux_rc); +} + +static void __exit +osal_mdc_module_exit(void) +{ + int unit = 0; + +#if LINUX_VERSION_CODE <= KERNEL_VERSION(4,2,8) + int linux_rc; + + linux_rc = misc_deregister(&_osal_mdc_misc); + if (0 != linux_rc) + { + OSAL_MDC_ERR("de-register dev %s failed, linux_rc=%d\n", OSAL_MDC_DRIVER_NAME, linux_rc); + } +#else + misc_deregister(&_osal_mdc_misc); +#endif + + /* ref: _osal_mdc_ioctl_disconnectIsrCallback */ + for (unit = 0; unit < NPS_CFG_MAXIMUM_CHIPS_PER_SYSTEM; unit++) + { + if (0 != (_osal_mdc_isr_init_bitmap & (1 << unit))) + { + osal_mdc_disconnectIsr(unit); + _osal_mdc_isr_init_bitmap &= ~(1 << unit); + } + } + + /* ref: _osal_mdc_ioctl_deinitRsrvDmaMemCallback */ +#if defined(NPS_EN_DMA_RESERVED) + if (1 == _osal_mdc_rsvDmaInited) + { + _osal_mdc_deinitRsrvDmaMem(&_osal_mdc_cb.dma_info); + _osal_mdc_rsvDmaInited = 0; + } +#endif + + /* ref: _osal_mdc_ioctl_deinitDeviceCallback */ + if (1 == _osal_mdc_devInited) + { +#if !defined(NPS_EN_DMA_RESERVED) + _osal_mdc_clearSysDmaList(0); + _osal_mdc_clearSysDmaList(1); +#endif + osal_mdc_deinitDevice(); + _osal_mdc_devInited = 0; + } +} + +#else + +static int __init +osal_mdc_module_init(void) +{ + return (0); +} + +static void __exit +osal_mdc_module_exit(void) +{ + +} + +#endif /* End of NPS_LINUX_USER_MODE */ + +module_init(osal_mdc_module_init); +module_exit(osal_mdc_module_exit); +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Nephos"); +MODULE_DESCRIPTION("SDK Kernel Module"); diff --git a/platform/nephos/one-image.mk b/platform/nephos/one-image.mk index d29d0e8c9350..27bb09b46dba 100644 --- a/platform/nephos/one-image.mk +++ b/platform/nephos/one-image.mk @@ -3,7 +3,7 @@ SONIC_ONE_IMAGE = sonic-nephos.bin $(SONIC_ONE_IMAGE)_MACHINE = nephos $(SONIC_ONE_IMAGE)_IMAGE_TYPE = onie -$(SONIC_ONE_IMAGE)_INSTALLS += $(NEPHOS_NPS_KERNEL) +$(SONIC_ONE_IMAGE)_INSTALLS += $(NEPHOS_MODULE) $(SONIC_ONE_IMAGE)_LAZY_INSTALLS += $(INGRASYS_S9130_32X_PLATFORM_MODULE) \ $(INGRASYS_S9230_64X_PLATFORM_MODULE) \ $(ACCTON_AS7116_54X_PLATFORM_MODULE) \ diff --git a/platform/nephos/rules.mk b/platform/nephos/rules.mk index 2e2ee702fb5b..f7781c2505b1 100644 --- a/platform/nephos/rules.mk +++ b/platform/nephos/rules.mk @@ -1,5 +1,5 @@ -include $(PLATFORM_PATH)/sdk.mk include $(PLATFORM_PATH)/sai.mk +include $(PLATFORM_PATH)/nephos-modules.mk include $(PLATFORM_PATH)/platform-modules-ingrasys.mk include $(PLATFORM_PATH)/platform-modules-accton.mk include $(PLATFORM_PATH)/platform-modules-cig.mk diff --git a/platform/nephos/sai.mk b/platform/nephos/sai.mk index 4bfa27999259..7ae2a9408c94 100644 --- a/platform/nephos/sai.mk +++ b/platform/nephos/sai.mk @@ -1,6 +1,6 @@ -SDK_VERSION = 2.0.5 -SAI_VERSION = 1.3.5 -SAI_COMMIT_ID = 575f90 +SDK_VERSION = 2.0.7 +SAI_VERSION = 1.3.6 +SAI_COMMIT_ID = ed5e9a NEPHOS_SAI = libsainps_$(SDK_VERSION)_sai_$(SAI_VERSION)_$(SAI_COMMIT_ID)_amd64.deb $(NEPHOS_SAI)_URL = "https://github.com/NephosInc/SONiC/raw/master/sai/libsainps_$(SDK_VERSION)_sai_$(SAI_VERSION)_$(SAI_COMMIT_ID)_amd64.deb" diff --git a/platform/nephos/sdk.mk b/platform/nephos/sdk.mk deleted file mode 100644 index 2cf92d12c12f..000000000000 --- a/platform/nephos/sdk.mk +++ /dev/null @@ -1,7 +0,0 @@ -SDK_VERSION = 2.0.5 -LINUX_VER = 4.9.0-9 -SDK_COMMIT_ID = f2e56f -NEPHOS_NPS_KERNEL = nps-modules-$(LINUX_VER)_$(SDK_VERSION)_$(SDK_COMMIT_ID)_amd64.deb -$(NEPHOS_NPS_KERNEL)_URL = "https://github.com/NephosInc/SONiC/raw/master/sdk/nps-modules-$(LINUX_VER)_$(SDK_VERSION)_$(SDK_COMMIT_ID)_amd64.deb" - -SONIC_ONLINE_DEBS += $(NEPHOS_NPS_KERNEL)