On Sat, Mar 17, 2012 at 9:34 AM, Ronald S. Bultje <[email protected]> wrote: > --- > libavcodec/x86/cabac.h | 17 ++++++++++------- > 1 files changed, 10 insertions(+), 7 deletions(-) > > diff --git a/libavcodec/x86/cabac.h b/libavcodec/x86/cabac.h > index 3c3652d..c4832c3 100644 > --- a/libavcodec/x86/cabac.h > +++ b/libavcodec/x86/cabac.h > @@ -105,8 +105,8 @@ static av_always_inline int > get_cabac_bypass_sign_x86(CABACContext *c, int val) > { > x86_reg tmp; > __asm__ volatile( > - "movl %4, %k1 \n\t" > - "movl %2, %%eax \n\t" > + "movl %c5(%2), %k1 \n\t" > + "movl %c3(%2), %%eax \n\t" > "shl $17, %k1 \n\t" > "add %%eax, %%eax \n\t" > "sub %k1, %%eax \n\t" > @@ -117,7 +117,7 @@ static av_always_inline int > get_cabac_bypass_sign_x86(CABACContext *c, int val) > "sub %%edx, %%ecx \n\t" > "test %%ax, %%ax \n\t" > " jnz 1f \n\t" > - "mov %3, %1 \n\t" > + "mov %c4(%2), %1 \n\t" > "subl $0xFFFF, %%eax \n\t" > "movzwl (%1), %%edx \n\t" > "bswap %%edx \n\t" > @@ -126,11 +126,14 @@ static av_always_inline int > get_cabac_bypass_sign_x86(CABACContext *c, int val) > "addl %%edx, %%eax \n\t" > "mov %1, %3 \n\t" > "1: \n\t" > - "movl %%eax, %2 \n\t" > + "movl %%eax, %c4(%2) \n\t" > > - :"+c"(val), "=&r"(tmp), "+m"(c->low), "+m"(c->bytestream) > - :"m"(c->range) > - : "%eax", "%edx" > + : "+c"(val), "=&r"(tmp) > + : "r"(c), > + "i"(offsetof(CABACContext, low)), > + "i"(offsetof(CABACContext, bytestream)), > + "i"(offsetof(CABACContext, range)) > + : "%eax", "%edx", "memory"
IMO clobbering memory looks very very hacky, and I don't like it. If you need to clobber something, it'd be much better if we could clobber exactly what needs clobbering, and nothing more. Jason _______________________________________________ libav-devel mailing list [email protected] https://lists.libav.org/mailman/listinfo/libav-devel
