Re: [Xen-devel] [RFC Patch v4 1/8] ioreq: remove most 'buf' parameter from static functions
On Wed, Dec 06, 2017 at 02:44:52PM +, Paul Durrant wrote: >> -Original Message- >> From: Chao Gao [mailto:chao@intel.com] >> Sent: 06 December 2017 07:50 >> To: xen-de...@lists.xen.org >> Cc: Chao Gao; Andrew Cooper >> ; Jan Beulich ; Paul >> Durrant >> Subject: [RFC Patch v4 1/8] ioreq: remove most 'buf' parameter from static >> functions >> >> It is a preparation to support multiple IOREQ pages. >> No functional change. >> >> Signed-off-by: Chao Gao >> --- >> v4: >> -new >> --- >> xen/arch/x86/hvm/ioreq.c | 48 +++-- >> --- >> 1 file changed, 23 insertions(+), 25 deletions(-) >> >> diff --git a/xen/arch/x86/hvm/ioreq.c b/xen/arch/x86/hvm/ioreq.c >> index d991ac9..a879f20 100644 >> --- a/xen/arch/x86/hvm/ioreq.c >> +++ b/xen/arch/x86/hvm/ioreq.c >> @@ -237,10 +237,9 @@ static void hvm_free_ioreq_gfn(struct >> hvm_ioreq_server *s, gfn_t gfn) >> set_bit(i, >arch.hvm_domain.ioreq_gfn.mask); >> } >> >> -static void hvm_unmap_ioreq_gfn(struct hvm_ioreq_server *s, bool buf) >> +static void hvm_unmap_ioreq_gfn(struct hvm_ioreq_server *s, >> +struct hvm_ioreq_page *iorp) >> { >> -struct hvm_ioreq_page *iorp = buf ? >bufioreq : >ioreq; >> - > >I don't really like this approach. I'd prefer swapping the bool for an >unsigned page index, where we follow the convention adopted in >hvm_get_ioreq_server_frame() for which macros exist: 0 equating to the >bufioreq page, 1+ for the struct-per-cpu pages. Ok. I have no preference for these two. But I will take your advice. Thanks Chao ___ Xen-devel mailing list Xen-devel@lists.xenproject.org https://lists.xenproject.org/mailman/listinfo/xen-devel
Re: [Xen-devel] [RFC Patch v4 1/8] ioreq: remove most 'buf' parameter from static functions
> -Original Message- > From: Chao Gao [mailto:chao@intel.com] > Sent: 06 December 2017 07:50 > To: xen-de...@lists.xen.org > Cc: Chao Gao; Andrew Cooper > ; Jan Beulich ; Paul > Durrant > Subject: [RFC Patch v4 1/8] ioreq: remove most 'buf' parameter from static > functions > > It is a preparation to support multiple IOREQ pages. > No functional change. > > Signed-off-by: Chao Gao > --- > v4: > -new > --- > xen/arch/x86/hvm/ioreq.c | 48 +++-- > --- > 1 file changed, 23 insertions(+), 25 deletions(-) > > diff --git a/xen/arch/x86/hvm/ioreq.c b/xen/arch/x86/hvm/ioreq.c > index d991ac9..a879f20 100644 > --- a/xen/arch/x86/hvm/ioreq.c > +++ b/xen/arch/x86/hvm/ioreq.c > @@ -237,10 +237,9 @@ static void hvm_free_ioreq_gfn(struct > hvm_ioreq_server *s, gfn_t gfn) > set_bit(i, >arch.hvm_domain.ioreq_gfn.mask); > } > > -static void hvm_unmap_ioreq_gfn(struct hvm_ioreq_server *s, bool buf) > +static void hvm_unmap_ioreq_gfn(struct hvm_ioreq_server *s, > +struct hvm_ioreq_page *iorp) > { > -struct hvm_ioreq_page *iorp = buf ? >bufioreq : >ioreq; > - I don't really like this approach. I'd prefer swapping the bool for an unsigned page index, where we follow the convention adopted in hvm_get_ioreq_server_frame() for which macros exist: 0 equating to the bufioreq page, 1+ for the struct-per-cpu pages. Paul > if ( gfn_eq(iorp->gfn, INVALID_GFN) ) > return; > > @@ -289,15 +288,15 @@ static int hvm_map_ioreq_gfn(struct > hvm_ioreq_server *s, bool buf) > >va); > > if ( rc ) > -hvm_unmap_ioreq_gfn(s, buf); > +hvm_unmap_ioreq_gfn(s, iorp); > > return rc; > } > > -static int hvm_alloc_ioreq_mfn(struct hvm_ioreq_server *s, bool buf) > +static int hvm_alloc_ioreq_mfn(struct hvm_ioreq_server *s, > + struct hvm_ioreq_page *iorp) > { > struct domain *currd = current->domain; > -struct hvm_ioreq_page *iorp = buf ? >bufioreq : >ioreq; > > if ( iorp->page ) > { > @@ -344,10 +343,9 @@ static int hvm_alloc_ioreq_mfn(struct > hvm_ioreq_server *s, bool buf) > return 0; > } > > -static void hvm_free_ioreq_mfn(struct hvm_ioreq_server *s, bool buf) > +static void hvm_free_ioreq_mfn(struct hvm_ioreq_server *s, > + struct hvm_ioreq_page *iorp) > { > -struct hvm_ioreq_page *iorp = buf ? >bufioreq : >ioreq; > - > if ( !iorp->page ) > return; > > @@ -380,11 +378,11 @@ bool is_ioreq_server_page(struct domain *d, const > struct page_info *page) > return found; > } > > -static void hvm_remove_ioreq_gfn(struct hvm_ioreq_server *s, bool buf) > +static void hvm_remove_ioreq_gfn(struct hvm_ioreq_server *s, > + struct hvm_ioreq_page *iorp) > > { > struct domain *d = s->domain; > -struct hvm_ioreq_page *iorp = buf ? >bufioreq : >ioreq; > > if ( IS_DEFAULT(s) || gfn_eq(iorp->gfn, INVALID_GFN) ) > return; > @@ -395,10 +393,10 @@ static void hvm_remove_ioreq_gfn(struct > hvm_ioreq_server *s, bool buf) > clear_page(iorp->va); > } > > -static int hvm_add_ioreq_gfn(struct hvm_ioreq_server *s, bool buf) > +static int hvm_add_ioreq_gfn(struct hvm_ioreq_server *s, > + struct hvm_ioreq_page *iorp) > { > struct domain *d = s->domain; > -struct hvm_ioreq_page *iorp = buf ? >bufioreq : >ioreq; > int rc; > > if ( IS_DEFAULT(s) || gfn_eq(iorp->gfn, INVALID_GFN) ) > @@ -550,36 +548,36 @@ static int hvm_ioreq_server_map_pages(struct > hvm_ioreq_server *s) > rc = hvm_map_ioreq_gfn(s, true); > > if ( rc ) > -hvm_unmap_ioreq_gfn(s, false); > +hvm_unmap_ioreq_gfn(s, >ioreq); > > return rc; > } > > static void hvm_ioreq_server_unmap_pages(struct hvm_ioreq_server *s) > { > -hvm_unmap_ioreq_gfn(s, true); > -hvm_unmap_ioreq_gfn(s, false); > +hvm_unmap_ioreq_gfn(s, >ioreq); > +hvm_unmap_ioreq_gfn(s, >bufioreq); > } > > static int hvm_ioreq_server_alloc_pages(struct hvm_ioreq_server *s) > { > int rc; > > -rc = hvm_alloc_ioreq_mfn(s, false); > +rc = hvm_alloc_ioreq_mfn(s, >ioreq); > > if ( !rc && (s->bufioreq_handling != HVM_IOREQSRV_BUFIOREQ_OFF) ) > -rc = hvm_alloc_ioreq_mfn(s, true); > +rc = hvm_alloc_ioreq_mfn(s, >bufioreq); > > if ( rc ) > -hvm_free_ioreq_mfn(s, false); > +hvm_free_ioreq_mfn(s, >ioreq); > > return rc; > } > > static void hvm_ioreq_server_free_pages(struct hvm_ioreq_server *s) > { > -hvm_free_ioreq_mfn(s, true); > -hvm_free_ioreq_mfn(s, false); > +hvm_free_ioreq_mfn(s, >bufioreq); > +hvm_free_ioreq_mfn(s, >ioreq); > } > > static void hvm_ioreq_server_free_rangesets(struct hvm_ioreq_server *s) >
[Xen-devel] [RFC Patch v4 1/8] ioreq: remove most 'buf' parameter from static functions
It is a preparation to support multiple IOREQ pages. No functional change. Signed-off-by: Chao Gao--- v4: -new --- xen/arch/x86/hvm/ioreq.c | 48 +++- 1 file changed, 23 insertions(+), 25 deletions(-) diff --git a/xen/arch/x86/hvm/ioreq.c b/xen/arch/x86/hvm/ioreq.c index d991ac9..a879f20 100644 --- a/xen/arch/x86/hvm/ioreq.c +++ b/xen/arch/x86/hvm/ioreq.c @@ -237,10 +237,9 @@ static void hvm_free_ioreq_gfn(struct hvm_ioreq_server *s, gfn_t gfn) set_bit(i, >arch.hvm_domain.ioreq_gfn.mask); } -static void hvm_unmap_ioreq_gfn(struct hvm_ioreq_server *s, bool buf) +static void hvm_unmap_ioreq_gfn(struct hvm_ioreq_server *s, +struct hvm_ioreq_page *iorp) { -struct hvm_ioreq_page *iorp = buf ? >bufioreq : >ioreq; - if ( gfn_eq(iorp->gfn, INVALID_GFN) ) return; @@ -289,15 +288,15 @@ static int hvm_map_ioreq_gfn(struct hvm_ioreq_server *s, bool buf) >va); if ( rc ) -hvm_unmap_ioreq_gfn(s, buf); +hvm_unmap_ioreq_gfn(s, iorp); return rc; } -static int hvm_alloc_ioreq_mfn(struct hvm_ioreq_server *s, bool buf) +static int hvm_alloc_ioreq_mfn(struct hvm_ioreq_server *s, + struct hvm_ioreq_page *iorp) { struct domain *currd = current->domain; -struct hvm_ioreq_page *iorp = buf ? >bufioreq : >ioreq; if ( iorp->page ) { @@ -344,10 +343,9 @@ static int hvm_alloc_ioreq_mfn(struct hvm_ioreq_server *s, bool buf) return 0; } -static void hvm_free_ioreq_mfn(struct hvm_ioreq_server *s, bool buf) +static void hvm_free_ioreq_mfn(struct hvm_ioreq_server *s, + struct hvm_ioreq_page *iorp) { -struct hvm_ioreq_page *iorp = buf ? >bufioreq : >ioreq; - if ( !iorp->page ) return; @@ -380,11 +378,11 @@ bool is_ioreq_server_page(struct domain *d, const struct page_info *page) return found; } -static void hvm_remove_ioreq_gfn(struct hvm_ioreq_server *s, bool buf) +static void hvm_remove_ioreq_gfn(struct hvm_ioreq_server *s, + struct hvm_ioreq_page *iorp) { struct domain *d = s->domain; -struct hvm_ioreq_page *iorp = buf ? >bufioreq : >ioreq; if ( IS_DEFAULT(s) || gfn_eq(iorp->gfn, INVALID_GFN) ) return; @@ -395,10 +393,10 @@ static void hvm_remove_ioreq_gfn(struct hvm_ioreq_server *s, bool buf) clear_page(iorp->va); } -static int hvm_add_ioreq_gfn(struct hvm_ioreq_server *s, bool buf) +static int hvm_add_ioreq_gfn(struct hvm_ioreq_server *s, + struct hvm_ioreq_page *iorp) { struct domain *d = s->domain; -struct hvm_ioreq_page *iorp = buf ? >bufioreq : >ioreq; int rc; if ( IS_DEFAULT(s) || gfn_eq(iorp->gfn, INVALID_GFN) ) @@ -550,36 +548,36 @@ static int hvm_ioreq_server_map_pages(struct hvm_ioreq_server *s) rc = hvm_map_ioreq_gfn(s, true); if ( rc ) -hvm_unmap_ioreq_gfn(s, false); +hvm_unmap_ioreq_gfn(s, >ioreq); return rc; } static void hvm_ioreq_server_unmap_pages(struct hvm_ioreq_server *s) { -hvm_unmap_ioreq_gfn(s, true); -hvm_unmap_ioreq_gfn(s, false); +hvm_unmap_ioreq_gfn(s, >ioreq); +hvm_unmap_ioreq_gfn(s, >bufioreq); } static int hvm_ioreq_server_alloc_pages(struct hvm_ioreq_server *s) { int rc; -rc = hvm_alloc_ioreq_mfn(s, false); +rc = hvm_alloc_ioreq_mfn(s, >ioreq); if ( !rc && (s->bufioreq_handling != HVM_IOREQSRV_BUFIOREQ_OFF) ) -rc = hvm_alloc_ioreq_mfn(s, true); +rc = hvm_alloc_ioreq_mfn(s, >bufioreq); if ( rc ) -hvm_free_ioreq_mfn(s, false); +hvm_free_ioreq_mfn(s, >ioreq); return rc; } static void hvm_ioreq_server_free_pages(struct hvm_ioreq_server *s) { -hvm_free_ioreq_mfn(s, true); -hvm_free_ioreq_mfn(s, false); +hvm_free_ioreq_mfn(s, >bufioreq); +hvm_free_ioreq_mfn(s, >ioreq); } static void hvm_ioreq_server_free_rangesets(struct hvm_ioreq_server *s) @@ -646,8 +644,8 @@ static void hvm_ioreq_server_enable(struct hvm_ioreq_server *s) if ( s->enabled ) goto done; -hvm_remove_ioreq_gfn(s, false); -hvm_remove_ioreq_gfn(s, true); +hvm_remove_ioreq_gfn(s, >ioreq); +hvm_remove_ioreq_gfn(s, >bufioreq); s->enabled = true; @@ -667,8 +665,8 @@ static void hvm_ioreq_server_disable(struct hvm_ioreq_server *s) if ( !s->enabled ) goto done; -hvm_add_ioreq_gfn(s, true); -hvm_add_ioreq_gfn(s, false); +hvm_add_ioreq_gfn(s, >bufioreq); +hvm_add_ioreq_gfn(s, >ioreq); s->enabled = false; -- 1.8.3.1 ___ Xen-devel mailing list Xen-devel@lists.xenproject.org https://lists.xenproject.org/mailman/listinfo/xen-devel