In particular use grant_ref_t and grant_handle_t where appropriate.
Also switch other nearby type uses to their canonical variants where
appropriate and introduce INVALID_MAPTRACK_HANDLE.
Signed-by: Jan Beulich
--- a/xen/common/grant_table.c
+++ b/xen/common/grant_table.c
@@ -96,7 +96,7 @@ struct gnttab_unmap_common {
int16_t status;
/* Shared state beteen *_unmap and *_unmap_complete */
-u16 done;
+uint16_t done;
unsigned long frame;
struct domain *rd;
grant_ref_t ref;
@@ -118,11 +118,11 @@ struct gnttab_unmap_common {
* table of these, indexes into which are returned as a 'mapping handle'.
*/
struct grant_mapping {
-u32 ref; /* grant ref */
-u16 flags; /* 0-4: GNTMAP_* ; 5-15: unused */
+grant_ref_t ref;/* grant ref */
+uint16_t flags; /* 0-4: GNTMAP_* ; 5-15: unused */
domid_t domid; /* granting domain */
-u32 vcpu; /* vcpu which created the grant mapping */
-u32 pad; /* round size to a power of 2 */
+uint32_t vcpu; /* vcpu which created the grant mapping */
+uint32_t pad; /* round size to a power of 2 */
};
#define MAPTRACK_PER_PAGE (PAGE_SIZE / sizeof(struct grant_mapping))
@@ -158,10 +158,10 @@ shared_entry_header(struct grant_table *
/* Active grant entry - used for shadowing GTF_permit_access grants. */
struct active_grant_entry {
-u32 pin;/* Reference count information. */
+uint32_t pin;/* Reference count information. */
domid_t domid; /* Domain being granted access. */
struct domain *trans_domain;
-uint32_t trans_gref;
+grant_ref_t trans_gref;
unsigned long frame; /* Frame being granted. */
unsigned long gfn;/* Guest's idea of the frame being granted. */
unsigned is_sub_page:1; /* True if this is a sub-page grant. */
@@ -297,7 +297,9 @@ double_gt_unlock(struct grant_table *lgt
grant_write_unlock(rgt);
}
-static inline int
+#define INVALID_MAPTRACK_HANDLE UINT_MAX
+
+static inline grant_handle_t
__get_maptrack_handle(
struct grant_table *t,
struct vcpu *v)
@@ -312,7 +314,7 @@ __get_maptrack_handle(
if ( unlikely(head == MAPTRACK_TAIL) )
{
spin_unlock(>maptrack_freelist_lock);
-return -1;
+return INVALID_MAPTRACK_HANDLE;
}
/*
@@ -323,7 +325,7 @@ __get_maptrack_handle(
if ( unlikely(next == MAPTRACK_TAIL) )
{
spin_unlock(>maptrack_freelist_lock);
-return -1;
+return INVALID_MAPTRACK_HANDLE;
}
prev_head = head;
@@ -345,8 +347,8 @@ __get_maptrack_handle(
* each VCPU and to avoid two VCPU repeatedly stealing entries from
* each other, the initial victim VCPU is selected randomly.
*/
-static int steal_maptrack_handle(struct grant_table *t,
- const struct vcpu *curr)
+static grant_handle_t steal_maptrack_handle(struct grant_table *t,
+const struct vcpu *curr)
{
const struct domain *currd = curr->domain;
unsigned int first, i;
@@ -357,10 +359,10 @@ static int steal_maptrack_handle(struct
do {
if ( currd->vcpu[i] )
{
-int handle;
+grant_handle_t handle;
handle = __get_maptrack_handle(t, currd->vcpu[i]);
-if ( handle != -1 )
+if ( handle != INVALID_MAPTRACK_HANDLE )
{
maptrack_entry(t, handle).vcpu = curr->vcpu_id;
return handle;
@@ -373,12 +375,12 @@ static int steal_maptrack_handle(struct
} while ( i != first );
/* No free handles on any VCPU. */
-return -1;
+return INVALID_MAPTRACK_HANDLE;
}
static inline void
put_maptrack_handle(
-struct grant_table *t, int handle)
+struct grant_table *t, grant_handle_t handle)
{
struct domain *currd = current->domain;
struct vcpu *v;
@@ -404,7 +406,7 @@ put_maptrack_handle(
spin_unlock(>maptrack_freelist_lock);
}
-static inline int
+static inline grant_handle_t
get_maptrack_handle(
struct grant_table *lgt)
{
@@ -414,7 +416,7 @@ get_maptrack_handle(
struct grant_mapping *new_mt = NULL;
handle = __get_maptrack_handle(lgt, curr);
-if ( likely(handle != -1) )
+if ( likely(handle != INVALID_MAPTRACK_HANDLE) )
return handle;
spin_lock(>maptrack_lock);
@@ -439,8 +441,8 @@ get_maptrack_handle(
if ( curr->maptrack_tail == MAPTRACK_TAIL )
{
handle = steal_maptrack_handle(lgt, curr);
-if ( handle == -1 )
-return -1;
+if ( handle == INVALID_MAPTRACK_HANDLE )
+return handle;
spin_lock(>maptrack_freelist_lock);
maptrack_entry(lgt,