Skip to content

Commit

Permalink
lcore: add LcoreMgr tool to manage the legacy lcore manager status (#577
Browse files Browse the repository at this point in the history
)

usage:
  --info: Print lcore shared manager detail info
  --clean_pid_auto_check: Clean the dead entries if PID is not active
  --clean_lcore <lcore id>: Clean the entry by lcore ID

Signed-off-by: Frank Du <frank.du@intel.com>
  • Loading branch information
frankdjx authored Nov 14, 2023
1 parent 425e7db commit 4f46e49
Show file tree
Hide file tree
Showing 12 changed files with 464 additions and 81 deletions.
8 changes: 8 additions & 0 deletions app/meson.build
Original file line number Diff line number Diff line change
Expand Up @@ -111,6 +111,14 @@ executable('ConvApp', conv_sources,
dependencies: [asan_dep, mtl]
)

# Legacy lcore manager
executable('LcoreMgr', lcore_mgr_sources,
c_args : app_c_args,
link_args: app_ld_args,
# asan should be always the first dep
dependencies: [asan_dep, mtl]
)

# Performance benchmarks for color convert
executable('PerfRfc4175422be10ToP10Le', perf_rfc4175_422be10_to_p10le_sources,
c_args : app_c_args,
Expand Down
94 changes: 94 additions & 0 deletions app/tools/lcore_shmem_mgr.c
Original file line number Diff line number Diff line change
@@ -0,0 +1,94 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright(c) 2023 Intel Corporation
*/

#include <errno.h>
#include <getopt.h>
#include <mtl/mtl_lcore_shm_api.h>
#include <stdlib.h>

#include "log.h"

enum lsm_args_cmd {
LSM_ARG_UNKNOWN = 0,
LSM_ARG_HELP = 0x100, /* start from end of ascii */
LSM_ARG_INFO,
LSM_ARG_CLEAN_PID_AUTO_CHECK,
LSM_ARG_CLEAN_LCORE,
LSM_ARG_MAX,
};

/*
struct option {
const char *name;
int has_arg;
int *flag;
int val;
};
*/
static struct option lsm_args_options[] = {
{"help", no_argument, 0, LSM_ARG_HELP},
{"info", no_argument, 0, LSM_ARG_INFO},
{"clean_pid_auto_check", no_argument, 0, LSM_ARG_CLEAN_PID_AUTO_CHECK},
{"clean_lcore", required_argument, 0, LSM_ARG_CLEAN_LCORE},
{0, 0, 0, 0},
};

static void lsm_print_help() {
printf("\n");
printf("##### Usage: #####\n\n");

printf("Params:\n");
printf(" --help: Print the help information\n");
printf(" --info: Print lcore shared manager detail info\n");
printf(" --clean_pid_auto_check: Clean the dead entries if PID is not active\n");
printf(" --clean_lcore <lcore id>: Clean the entry by lcore ID\n");

printf("\n");
}

int main(int argc, char** argv) {
int cmd = -1, opt_idx = 0;
int ret;

while (1) {
cmd = getopt_long_only(argc, argv, "hv", lsm_args_options, &opt_idx);
if (cmd == -1) break;

switch (cmd) {
case LSM_ARG_INFO:
mtl_lcore_shm_print();
break;
case LSM_ARG_CLEAN_PID_AUTO_CHECK:
ret = mtl_lcore_shm_clean(MTL_LCORE_CLEAN_PID_AUTO_CHECK, NULL, 0);
if (ret > 0)
info("Total %d dead lcores detected and deleted\n", ret);
else if (ret == 0)
info("No dead lcores detected\n");
else
err("Fail %d to clean shm by auto PID check\n", ret);
break;
case LSM_ARG_CLEAN_LCORE: {
int lcore = atoi(optarg);
if (lcore < 0) {
err("lcore %d is not valid\n", lcore);
return -EIO;
}
struct mtl_lcore_clean_pid_info pid;
pid.lcore = lcore;
ret = mtl_lcore_shm_clean(MTL_LCORE_CLEAN_LCORE, &pid, sizeof(pid));
if (ret >= 0)
info("Succ to delete lcore %d\n", lcore);
else
err("Fail %d to delete lcore %d\n", ret, lcore);
break;
}
case LSM_ARG_HELP:
default:
lsm_print_help();
return -1;
}
}

return 0;
}
2 changes: 2 additions & 0 deletions app/tools/meson.build
Original file line number Diff line number Diff line change
Expand Up @@ -2,3 +2,5 @@
# Copyright 2022 Intel Corporation

conv_sources = files('convert_app.c', 'convert_app_args.c')

lcore_mgr_sources = files('lcore_shmem_mgr.c')
24 changes: 24 additions & 0 deletions doc/lcore.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,24 @@
# Lcore Guide

## 1. Introduction

In DPDK (Data Plane Development Kit), an "lcore" stands for "logical core," and it represents a logical CPU core on a multi-core processor. Binding a thread to a specific logical core (lcore) is a technique used to achieve better control over the execution of packet processing tasks and to optimize the performance of networking applications.

It minimizes the impact of OS scheduler decisions, reduces cache-related issues, and allows for fine-grained control over CPU resources, all of which are critical for meeting the stringent performance requirements of networking workloads. Intel® Media Transport Library scheduler used pinned lcore also as performance consideration.

## 2. Lcore manager for multi process

IMTL supports multi-process deployment based on SR-IOV VF isolation. To manage the dispatching of lcores among multiple processes, it introduces shared memory mapping to maintain the status of lcore usage. Each process searches the mapping and allocates a new lcore slot from it, freeing the slot when it is no longer in use.
The last user attached to the shared memory will reset the mapping to an initial status during the `shmdt` operation as part of the release routine.

IMTL also provides a tool which can be find from `./build/app/LcoreMgr` for manually cleaning up lcore entries. This tool is typically used when a process fails to release lcores due to panic issues or other failures that prevent the regular free routine from running. Below is the usage information:

```bash
--info: Print lcore shared manager detail info
--clean_pid_auto_check: Clean the dead entries if PID is not active
--clean_lcore <lcore id>: Clean the entry by lcore ID
```
When you use the `--clean_pid_auto_check` option, the tool will perform a loop check for all the active entries in the map. It checks whether the hostname and user match the current login environment and then verifies if the PID is still running. If the tool detects that the PID is no longer active, it will proceed to remove the lcore from the mapping.
If you are deploying in a multi-container environment, the PID check becomes less useful as each container has its own process namespace. In such cases, you can use the --clean_lcore option to remove an entry based on the lcore ID. However, it's important to confirm that the lcore is not active before using this option.
2 changes: 1 addition & 1 deletion doc/run.md
Original file line number Diff line number Diff line change
Expand Up @@ -557,7 +557,7 @@ The VFIO driver can run without the IOMMU feature, enable it with below command
sudo bash -c 'echo 1 > /sys/module/vfio/parameters/enable_unsafe_noiommu_mode'
```
### 8.13 Fail to loading shared libraries
### 8.14 Fail to loading shared libraries
If you get below similar message when runing the RxTxApp, it's likely a ld library path problem.

Expand Down
2 changes: 1 addition & 1 deletion include/meson.build
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@
# Copyright 2022 Intel Corporation

mtl_header_files = files('mtl_api.h', 'st_api.h', 'st_convert_api.h', 'st_convert_internal.h', 'st_pipeline_api.h', 'st20_api.h', 'st30_api.h', 'st40_api.h',
'st20_redundant_api.h', 'mudp_api.h', 'mudp_sockfd_api.h', 'mudp_sockfd_internal.h')
'st20_redundant_api.h', 'mudp_api.h', 'mudp_sockfd_api.h', 'mudp_sockfd_internal.h', 'mtl_lcore_shm_api.h')

if is_windows
mtl_header_files += files('mudp_win.h')
Expand Down
65 changes: 65 additions & 0 deletions include/mtl_lcore_shm_api.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,65 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright(c) 2023 Intel Corporation
*/

/**
* @file mtl_lcore_shm_api.h
*
* Interfaces to the legacy lcore shared memory manager.
*
*/

#include "mtl_api.h"

#ifndef _MTL_LCORE_SHM_API_HEAD_H_
#define _MTL_LCORE_SHM_API_HEAD_H_

#if defined(__cplusplus)
extern "C" {
#endif

struct mtl_lcore_clean_pid_info {
uint32_t lcore;
};

/** lcore clean action */
enum mtl_lcore_clean_action {
/** auto, no args.
* Remove lcore usage if the PID is inactive under the same hostname and user.
*/
MTL_LCORE_CLEAN_PID_AUTO_CHECK = 0,
/** clean as PID info, args to struct mtl_lcore_clean_pid_info */
MTL_LCORE_CLEAN_LCORE,
/** max value of this enum */
MTL_LCORE_CLEAN_MAX,
};

/**
* Print out the legacy lcore manager(shared memory) status.
*
* @return
* - 0 if successful.
* - <0: Error code if fail.
*/
int mtl_lcore_shm_print(void);

/**
* Clean the unused lcore from the legacy lcore manager(shared memory).
* @param action
* The action type.
* @param args
* The args to the action type.
* @param args_sz
* The size of the args.
*
* @return
* - 0 if successful.
* - <0: Error code if fail.
*/
int mtl_lcore_shm_clean(enum mtl_lcore_clean_action action, void* args, size_t args_sz);

#if defined(__cplusplus)
}
#endif

#endif
2 changes: 2 additions & 0 deletions lib/src/mt_main.c
Original file line number Diff line number Diff line change
Expand Up @@ -464,6 +464,8 @@ mtl_handle mtl_init(struct mtl_init_params* p) {
impl = mt_rte_zmalloc_socket(sizeof(*impl), socket[MTL_PORT_P]);
if (!impl) goto err_exit;

mt_user_info_init(&impl->u_info);

#ifndef WINDOWSENV
if (geteuid() == 0)
impl->privileged = true;
Expand Down
29 changes: 25 additions & 4 deletions lib/src/mt_main.h
Original file line number Diff line number Diff line change
Expand Up @@ -545,14 +545,18 @@ struct mt_sch_impl {
uint64_t stat_sleep_ns_max;
};

struct mt_lcore_mgr {
struct mt_lcore_shm* lcore_shm;
int lcore_shm_id;
};

struct mt_sch_mgr {
struct mt_sch_impl sch[MT_MAX_SCH_NUM];
/* active sch cnt */
rte_atomic32_t sch_cnt;
pthread_mutex_t mgr_mutex; /* protect sch mgr */

struct mt_lcore_shm* lcore_shm;
int lcore_shm_id;
struct mt_lcore_mgr lcore_mgr;
int lcore_lock_fd;
bool local_lcores_active[RTE_MAX_LCORE]; /* local lcores active map */
};
Expand Down Expand Up @@ -707,9 +711,18 @@ struct mt_interface {
void* xdp;
};

struct mt_lcore_shm_entry {
char hostname[64];
char user[32];
pid_t pid;
bool active;
};

struct mt_lcore_shm {
int used; /* number of used lcores */
bool lcores_active[RTE_MAX_LCORE]; /* lcores active map */
/* number of used lcores */
int used;
/* lcores map info */
struct mt_lcore_shm_entry lcores_info[RTE_MAX_LCORE];
};

typedef int (*mt_dma_drop_mbuf_cb)(void* priv, struct rte_mbuf* mbuf);
Expand Down Expand Up @@ -1078,6 +1091,12 @@ struct mt_dp_impl {
rte_spinlock_t txq_sys_entry_lock; /* protect txq_sys_entry */
};

struct mt_user_info {
char hostname[64];
char user[32];
pid_t pid;
};

struct mtl_main_impl {
struct mt_interface inf[MTL_PORT_MAX];

Expand Down Expand Up @@ -1139,6 +1158,8 @@ struct mtl_main_impl {
/* st plugin dev mgr */
struct st_plugin_mgr plugin_mgr;

struct mt_user_info u_info;

void* mudp_rxq_mgr[MTL_PORT_MAX];

/* cnt for open sessions */
Expand Down
Loading

0 comments on commit 4f46e49

Please sign in to comment.