Skip to content

Commit

Permalink
hw/xen: Expose handle_bufioreq in xen_register_ioreq
Browse files Browse the repository at this point in the history
Expose handle_bufioreq in xen_register_ioreq().
This is to allow machines to enable or disable buffered ioreqs.

No functional change since all callers still set it to
HVM_IOREQSRV_BUFIOREQ_ATOMIC.

Reviewed-by: Stefano Stabellini <sstabellini@kernel.org>
Signed-off-by: Edgar E. Iglesias <edgar.iglesias@amd.com>
  • Loading branch information
edgarigl committed Oct 3, 2024
1 parent abdfd65 commit b2150e4
Show file tree
Hide file tree
Showing 5 changed files with 73 additions and 41 deletions.
4 changes: 3 additions & 1 deletion hw/i386/xen/xen-hvm.c
Original file line number Diff line number Diff line change
Expand Up @@ -614,7 +614,9 @@ void xen_hvm_init_pc(PCMachineState *pcms, MemoryRegion **ram_memory)

state = g_new0(XenIOState, 1);

xen_register_ioreq(state, max_cpus, &xen_memory_listener);
xen_register_ioreq(state, max_cpus,
HVM_IOREQSRV_BUFIOREQ_ATOMIC,
&xen_memory_listener);

xen_is_stubdomain = xen_check_stubdomain(state->xenstore);

Expand Down
100 changes: 62 additions & 38 deletions hw/xen/xen-hvm-common.c
Original file line number Diff line number Diff line change
Expand Up @@ -667,6 +667,8 @@ static int xen_map_ioreq_server(XenIOState *state)
xen_pfn_t ioreq_pfn;
xen_pfn_t bufioreq_pfn;
evtchn_port_t bufioreq_evtchn;
unsigned long num_frames = 1;
unsigned long frame = 1;
int rc;

/*
Expand All @@ -675,59 +677,78 @@ static int xen_map_ioreq_server(XenIOState *state)
*/
QEMU_BUILD_BUG_ON(XENMEM_resource_ioreq_server_frame_bufioreq != 0);
QEMU_BUILD_BUG_ON(XENMEM_resource_ioreq_server_frame_ioreq(0) != 1);

if (state->has_bufioreq) {
frame = 0;
num_frames = 2;
}
state->fres = xenforeignmemory_map_resource(xen_fmem, xen_domid,
XENMEM_resource_ioreq_server,
state->ioservid, 0, 2,
state->ioservid,
frame, num_frames,
&addr,
PROT_READ | PROT_WRITE, 0);
if (state->fres != NULL) {
trace_xen_map_resource_ioreq(state->ioservid, addr);
state->buffered_io_page = addr;
state->shared_page = addr + XC_PAGE_SIZE;
state->shared_page = addr;
if (state->has_bufioreq) {
state->buffered_io_page = addr;
state->shared_page = addr + XC_PAGE_SIZE;
}
} else if (errno != EOPNOTSUPP) {
error_report("failed to map ioreq server resources: error %d handle=%p",
errno, xen_xc);
return -1;
}

rc = xen_get_ioreq_server_info(xen_domid, state->ioservid,
(state->shared_page == NULL) ?
&ioreq_pfn : NULL,
(state->buffered_io_page == NULL) ?
&bufioreq_pfn : NULL,
&bufioreq_evtchn);
if (rc < 0) {
error_report("failed to get ioreq server info: error %d handle=%p",
errno, xen_xc);
return rc;
}
/*
* If we fail to map the shared page with xenforeignmemory_map_resource()
* or if we're using buffered ioreqs, we need xen_get_ioreq_server_info()
* to provide the the addresses to map the shared page and/or to get the
* event-channel port for buffered ioreqs.
*/
if (state->shared_page == NULL || state->has_bufioreq) {
rc = xen_get_ioreq_server_info(xen_domid, state->ioservid,
(state->shared_page == NULL) ?
&ioreq_pfn : NULL,
(state->has_bufioreq &&
state->buffered_io_page == NULL) ?
&bufioreq_pfn : NULL,
&bufioreq_evtchn);
if (rc < 0) {
error_report("failed to get ioreq server info: error %d handle=%p",
errno, xen_xc);
return rc;
}

if (state->shared_page == NULL) {
trace_xen_map_ioreq_server_shared_page(ioreq_pfn);
if (state->shared_page == NULL) {
trace_xen_map_ioreq_server_shared_page(ioreq_pfn);

state->shared_page = xenforeignmemory_map(xen_fmem, xen_domid,
PROT_READ | PROT_WRITE,
1, &ioreq_pfn, NULL);
state->shared_page = xenforeignmemory_map(xen_fmem, xen_domid,
PROT_READ | PROT_WRITE,
1, &ioreq_pfn, NULL);
}
if (state->shared_page == NULL) {
error_report("map shared IO page returned error %d handle=%p",
errno, xen_xc);
}
}

if (state->buffered_io_page == NULL) {
trace_xen_map_ioreq_server_buffered_io_page(bufioreq_pfn);
if (state->has_bufioreq && state->buffered_io_page == NULL) {
trace_xen_map_ioreq_server_buffered_io_page(bufioreq_pfn);

state->buffered_io_page = xenforeignmemory_map(xen_fmem, xen_domid,
PROT_READ | PROT_WRITE,
1, &bufioreq_pfn,
NULL);
if (state->buffered_io_page == NULL) {
error_report("map buffered IO page returned error %d", errno);
return -1;
state->buffered_io_page = xenforeignmemory_map(xen_fmem, xen_domid,
PROT_READ | PROT_WRITE,
1, &bufioreq_pfn,
NULL);
if (state->buffered_io_page == NULL) {
error_report("map buffered IO page returned error %d", errno);
return -1;
}
}
}

if (state->shared_page == NULL || state->buffered_io_page == NULL) {
if (state->shared_page == NULL ||
(state->has_bufioreq && state->buffered_io_page == NULL)) {
return -1;
}

Expand Down Expand Up @@ -830,14 +851,15 @@ static void xen_do_ioreq_register(XenIOState *state,
state->ioreq_local_port[i] = rc;
}

rc = qemu_xen_evtchn_bind_interdomain(state->xce_handle, xen_domid,
state->bufioreq_remote_port);
if (rc == -1) {
error_report("buffered evtchn bind error %d", errno);
goto err;
if (state->has_bufioreq) {
rc = qemu_xen_evtchn_bind_interdomain(state->xce_handle, xen_domid,
state->bufioreq_remote_port);
if (rc == -1) {
error_report("buffered evtchn bind error %d", errno);
goto err;
}
state->bufioreq_local_port = rc;
}
state->bufioreq_local_port = rc;

/* Init RAM management */
#ifdef XEN_COMPAT_PHYSMAP
xen_map_cache_init(xen_phys_offset_to_gaddr, state);
Expand Down Expand Up @@ -865,6 +887,7 @@ static void xen_do_ioreq_register(XenIOState *state,
}

void xen_register_ioreq(XenIOState *state, unsigned int max_cpus,
uint8_t handle_bufioreq,
const MemoryListener *xen_memory_listener)
{
int rc;
Expand All @@ -883,7 +906,8 @@ void xen_register_ioreq(XenIOState *state, unsigned int max_cpus,
goto err;
}

rc = xen_create_ioreq_server(xen_domid, &state->ioservid);
state->has_bufioreq = handle_bufioreq != HVM_IOREQSRV_BUFIOREQ_OFF;
rc = xen_create_ioreq_server(xen_domid, handle_bufioreq, &state->ioservid);
if (!rc) {
xen_do_ioreq_register(state, max_cpus, xen_memory_listener);
} else {
Expand Down
4 changes: 3 additions & 1 deletion hw/xen/xen-pvh-common.c
Original file line number Diff line number Diff line change
Expand Up @@ -194,7 +194,9 @@ static void xen_pvh_init(MachineState *ms)
}

xen_pvh_init_ram(s, sysmem);
xen_register_ioreq(&s->ioreq, ms->smp.max_cpus, &xen_memory_listener);
xen_register_ioreq(&s->ioreq, ms->smp.max_cpus,
HVM_IOREQSRV_BUFIOREQ_ATOMIC,
&xen_memory_listener);

if (s->cfg.virtio_mmio_num) {
xen_create_virtio_mmio_devices(s);
Expand Down
3 changes: 3 additions & 0 deletions include/hw/xen/xen-hvm-common.h
Original file line number Diff line number Diff line change
Expand Up @@ -81,6 +81,8 @@ typedef struct XenIOState {
QLIST_HEAD(, XenPciDevice) dev_list;
DeviceListener device_listener;

bool has_bufioreq;

Notifier exit;
} XenIOState;

Expand All @@ -95,6 +97,7 @@ void xen_device_unrealize(DeviceListener *listener, DeviceState *dev);

void xen_hvm_change_state_handler(void *opaque, bool running, RunState rstate);
void xen_register_ioreq(XenIOState *state, unsigned int max_cpus,
uint8_t handle_bufioreq,
const MemoryListener *xen_memory_listener);

void cpu_ioreq_pio(ioreq_t *req);
Expand Down
3 changes: 2 additions & 1 deletion include/hw/xen/xen_native.h
Original file line number Diff line number Diff line change
Expand Up @@ -464,10 +464,11 @@ static inline void xen_unmap_pcidev(domid_t dom,
}

static inline int xen_create_ioreq_server(domid_t dom,
int handle_bufioreq,
ioservid_t *ioservid)
{
int rc = xendevicemodel_create_ioreq_server(xen_dmod, dom,
HVM_IOREQSRV_BUFIOREQ_ATOMIC,
handle_bufioreq,
ioservid);

if (rc == 0) {
Expand Down

0 comments on commit b2150e4

Please sign in to comment.