diff options
Diffstat (limited to 'include/xen')
-rw-r--r-- | include/xen/acpi.h | 16 | ||||
-rw-r--r-- | include/xen/events.h | 3 | ||||
-rw-r--r-- | include/xen/hvm.h | 4 | ||||
-rw-r--r-- | include/xen/interface/io/blkif.h | 53 | ||||
-rw-r--r-- | include/xen/interface/io/netif.h | 12 | ||||
-rw-r--r-- | include/xen/interface/io/protocols.h | 2 | ||||
-rw-r--r-- | include/xen/interface/io/ring.h | 5 |
7 files changed, 91 insertions, 4 deletions
diff --git a/include/xen/acpi.h b/include/xen/acpi.h index 68d73d09b77..46aa3d1c165 100644 --- a/include/xen/acpi.h +++ b/include/xen/acpi.h @@ -78,11 +78,25 @@ static inline int xen_acpi_get_pxm(acpi_handle h) int xen_acpi_notify_hypervisor_state(u8 sleep_state, u32 pm1a_cnt, u32 pm1b_cnd); +static inline int xen_acpi_suspend_lowlevel(void) +{ + /* + * Xen will save and restore CPU context, so + * we can skip that and just go straight to + * the suspend. + */ + acpi_enter_sleep_state(ACPI_STATE_S3); + return 0; +} + static inline void xen_acpi_sleep_register(void) { - if (xen_initial_domain()) + if (xen_initial_domain()) { acpi_os_set_prepare_sleep( &xen_acpi_notify_hypervisor_state); + + acpi_suspend_lowlevel = xen_acpi_suspend_lowlevel; + } } #else static inline void xen_acpi_sleep_register(void) diff --git a/include/xen/events.h b/include/xen/events.h index b2b27c6a0f7..c9ea10ee227 100644 --- a/include/xen/events.h +++ b/include/xen/events.h @@ -76,6 +76,9 @@ unsigned irq_from_evtchn(unsigned int evtchn); /* Xen HVM evtchn vector callback */ void xen_hvm_callback_vector(void); +#ifdef CONFIG_TRACING +#define trace_xen_hvm_callback_vector xen_hvm_callback_vector +#endif extern int xen_have_vector_callback; int xen_set_callback_via(uint64_t via); void xen_evtchn_do_upcall(struct pt_regs *regs); diff --git a/include/xen/hvm.h b/include/xen/hvm.h index 13e43e41637..63917a8de3b 100644 --- a/include/xen/hvm.h +++ b/include/xen/hvm.h @@ -44,8 +44,8 @@ static inline int hvm_get_parameter(int idx, uint64_t *value) xhv.index = idx; r = HYPERVISOR_hvm_op(HVMOP_get_param, &xhv); if (r < 0) { - printk(KERN_ERR "Cannot get hvm parameter %s (%d): %d!\n", - param_name(idx), idx, r); + pr_err("Cannot get hvm parameter %s (%d): %d!\n", + param_name(idx), idx, r); return r; } *value = xhv.value; diff --git a/include/xen/interface/io/blkif.h b/include/xen/interface/io/blkif.h index ffd4652de91..65e12099ef8 100644 --- a/include/xen/interface/io/blkif.h +++ b/include/xen/interface/io/blkif.h @@ -103,12 +103,46 @@ typedef uint64_t blkif_sector_t; #define BLKIF_OP_DISCARD 5 /* + * Recognized if "feature-max-indirect-segments" in present in the backend + * xenbus info. The "feature-max-indirect-segments" node contains the maximum + * number of segments allowed by the backend per request. If the node is + * present, the frontend might use blkif_request_indirect structs in order to + * issue requests with more than BLKIF_MAX_SEGMENTS_PER_REQUEST (11). The + * maximum number of indirect segments is fixed by the backend, but the + * frontend can issue requests with any number of indirect segments as long as + * it's less than the number provided by the backend. The indirect_grefs field + * in blkif_request_indirect should be filled by the frontend with the + * grant references of the pages that are holding the indirect segments. + * This pages are filled with an array of blkif_request_segment_aligned + * that hold the information about the segments. The number of indirect + * pages to use is determined by the maximum number of segments + * a indirect request contains. Every indirect page can contain a maximum + * of 512 segments (PAGE_SIZE/sizeof(blkif_request_segment_aligned)), + * so to calculate the number of indirect pages to use we have to do + * ceil(indirect_segments/512). + * + * If a backend does not recognize BLKIF_OP_INDIRECT, it should *not* + * create the "feature-max-indirect-segments" node! + */ +#define BLKIF_OP_INDIRECT 6 + +/* * Maximum scatter/gather segments per request. * This is carefully chosen so that sizeof(struct blkif_ring) <= PAGE_SIZE. * NB. This could be 12 if the ring indexes weren't stored in the same page. */ #define BLKIF_MAX_SEGMENTS_PER_REQUEST 11 +#define BLKIF_MAX_INDIRECT_PAGES_PER_REQUEST 8 + +struct blkif_request_segment_aligned { + grant_ref_t gref; /* reference to I/O buffer frame */ + /* @first_sect: first sector in frame to transfer (inclusive). */ + /* @last_sect: last sector in frame to transfer (inclusive). */ + uint8_t first_sect, last_sect; + uint16_t _pad; /* padding to make it 8 bytes, so it's cache-aligned */ +} __attribute__((__packed__)); + struct blkif_request_rw { uint8_t nr_segments; /* number of segments */ blkif_vdev_t handle; /* only for read/write requests */ @@ -147,12 +181,31 @@ struct blkif_request_other { uint64_t id; /* private guest value, echoed in resp */ } __attribute__((__packed__)); +struct blkif_request_indirect { + uint8_t indirect_op; + uint16_t nr_segments; +#ifdef CONFIG_X86_64 + uint32_t _pad1; /* offsetof(blkif_...,u.indirect.id) == 8 */ +#endif + uint64_t id; + blkif_sector_t sector_number; + blkif_vdev_t handle; + uint16_t _pad2; + grant_ref_t indirect_grefs[BLKIF_MAX_INDIRECT_PAGES_PER_REQUEST]; +#ifdef CONFIG_X86_64 + uint32_t _pad3; /* make it 64 byte aligned */ +#else + uint64_t _pad3; /* make it 64 byte aligned */ +#endif +} __attribute__((__packed__)); + struct blkif_request { uint8_t operation; /* BLKIF_OP_??? */ union { struct blkif_request_rw rw; struct blkif_request_discard discard; struct blkif_request_other other; + struct blkif_request_indirect indirect; } u; } __attribute__((__packed__)); diff --git a/include/xen/interface/io/netif.h b/include/xen/interface/io/netif.h index 3ef3fe05ee9..eb262e3324d 100644 --- a/include/xen/interface/io/netif.h +++ b/include/xen/interface/io/netif.h @@ -38,6 +38,18 @@ * that it cannot safely queue packets (as it may not be kicked to send them). */ + /* + * "feature-split-event-channels" is introduced to separate guest TX + * and RX notificaion. Backend either doesn't support this feature or + * advertise it via xenstore as 0 (disabled) or 1 (enabled). + * + * To make use of this feature, frontend should allocate two event + * channels for TX and RX, advertise them to backend as + * "event-channel-tx" and "event-channel-rx" respectively. If frontend + * doesn't want to use this feature, it just writes "event-channel" + * node as before. + */ + /* * This is the 'wire' format for packets: * Request 1: xen_netif_tx_request -- XEN_NETTXF_* (any flags) diff --git a/include/xen/interface/io/protocols.h b/include/xen/interface/io/protocols.h index 0eafaf254ff..056744b4b05 100644 --- a/include/xen/interface/io/protocols.h +++ b/include/xen/interface/io/protocols.h @@ -15,7 +15,7 @@ # define XEN_IO_PROTO_ABI_NATIVE XEN_IO_PROTO_ABI_IA64 #elif defined(__powerpc64__) # define XEN_IO_PROTO_ABI_NATIVE XEN_IO_PROTO_ABI_POWERPC64 -#elif defined(__arm__) +#elif defined(__arm__) || defined(__aarch64__) # define XEN_IO_PROTO_ABI_NATIVE XEN_IO_PROTO_ABI_ARM #else # error arch fixup needed here diff --git a/include/xen/interface/io/ring.h b/include/xen/interface/io/ring.h index 75271b9a8f6..7d28aff605c 100644 --- a/include/xen/interface/io/ring.h +++ b/include/xen/interface/io/ring.h @@ -188,6 +188,11 @@ struct __name##_back_ring { \ #define RING_REQUEST_CONS_OVERFLOW(_r, _cons) \ (((_cons) - (_r)->rsp_prod_pvt) >= RING_SIZE(_r)) +/* Ill-behaved frontend determination: Can there be this many requests? */ +#define RING_REQUEST_PROD_OVERFLOW(_r, _prod) \ + (((_prod) - (_r)->rsp_prod_pvt) > RING_SIZE(_r)) + + #define RING_PUSH_REQUESTS(_r) do { \ wmb(); /* back sees requests /before/ updated producer index */ \ (_r)->sring->req_prod = (_r)->req_prod_pvt; \ |