Hi all, Can gatekeeper help review for a fix for bug860. https://bugs.open64.net/show_bug.cgi?id=860
Problem: case buffer.c at line 60:
58 for (i = 0; i < 8; i++)
59 {
60 struct buffer_head *bh = lru->bhs[i];
when compile at O2:
112 # 60 struct buffer_head *bh = lru->bhs[i];
113 movq 0(%rax),%r8 # [0] id:56 #kernel
boot fail at here
%rax come from:
72 # 45 lru = &(*(
73 # 46 {
74 # 47 unsigned long __ptr;
75 # 48 __asm__ ("": "=r" (__ptr):"0" ((&per_cpu__bh_lrus)));
76 movq $(.data.percpu),%rax # [0] .data.percpu
77
78 .LBB3_lookup_bh_lru:
#here should have a move from %rax to %r11
79 movq %gs:per_cpu__this_cpu_off(%rip),%r8
80 .LBB4_lookup_bh_lru:
81 .loc 1 58 0
82 # 54 ret__;}
83 # 55 ))));}
84 # 56 ));
85 # 57
86 # 58 for (i = 0; i < 8; i++)
87 movq 32(%rsp),%r9 # [0] bdev
88 movq 40(%rsp),%r10 # [0] block
89 .loc 1 54 0
90 leaq 0(%r8,%r11,1), %rdx # [0] %r11 is
never defined before
91 xorq %rsi,%rsi # [1]
92 xorl %ecx,%ecx # [1]
93 movq %r8,.bss+8(%rip) # [1] id:54 anon2+0x0
94 .loc 1 58 0
95 movq %rdx,%rax # [2]
The root reason is "[ 48, 0] GTN152 :- mov64 GTN151 ; copy" is deleted
at EBO_Remove_Unused_Ops, but it has use at BB4 and should not be deleted.
// Block: 3 Pred: 2 Succ: 4
[ 48, 0] GTN152 :- mov64 GTN151 ; copy //deleted at
EBO_Remove_Unused_Ops
|||||[53] asm ("mov" "q " "%%" "gs" ":%P" "1" ",%0": "=r" (ret__):"m"
(per_cpu__this_cpu_off));
[ 53, 0] GTN153 :- asm (sym:per_cpu__this_cpu_off+0) ; side_effects
// Block: 4 Pred: 3 Succ: 5
-----------------------------------------------------------------------
|||||[54] ret__;}
[ 54, 0] :- store64 GTN153 GTN34(%rip) (sym:.bss+8) ; WN id:54 anon2+0x0
[ 54, 0] GTN156 :- leax64 GTN153 GTN152 (0x1) (0x0) ;
[ 54, 0] :- store64 GTN156 GTN34(%rip) (sym:.bss+0) ; WN id:55 anon1+0x0
[ 0, 0] GTN157 :- zero32 ;
[ 0, 0] GTN159 :- zero64 ;
Then found at fun EBO_Remove_Unused_Ops, for TN152, its
tninfo->redefined_before_block_end is true.
3370 /* Check for indirect and global references. */
3371 if (!tninfo->redefined_before_block_end &&
3372 TN_live_out_of(tn, tninfo->in_bb)) goto op_is_needed;
3373
redefined_before_block_end set to true is because at
CGTARG_Gen_Dedicated_Subclass_TN,
TN153 will always return ISA_REGISTER_SUBCLASS_r11 (because the opcode is
"TOP_asm"), which is the same register for TN152. but infact its fixed
number of operands or results are 0, and should not have register set info.
4041 TN* CGTARG_Gen_Dedicated_Subclass_TN( OP* op, int idx, BOOL
is_result )
4042 {
4043 const ISA_REGISTER_SUBCLASS subclass = is_result ?
4044 OP_result_reg_subclass( op, idx ) : OP_opnd_reg_subclass( op,
idx );
4045 const REGISTER_SET subclass_regs =
REGISTER_SUBCLASS_members(subclass);
4046
4047 if( REGISTER_SET_Size(subclass_regs) != 1 ){
4048 TN* tn = is_result ? OP_result( op, idx ) : OP_opnd( op, idx );
4049 return TN_is_dedicated(tn) ? tn : NULL;
4050 }
4051
4052 const REGISTER reg = REGISTER_SET_Choose(subclass_regs);
4053 const ISA_REGISTER_CLASS rc =
REGISTER_SUBCLASS_register_class(subclass);
4054
4055 return Build_Dedicated_TN( rc, reg, 0 );
4056 }
The fix is to add check on the idx, make sure it did not overlap fixed opnd
or results.
Thanks
zhuqing
extern __attribute__ ((section (".data.percpu" ""))) __typeof__ (unsigned long) per_cpu__this_cpu_off;
static inline void native_irq_disable (void)
{
asm volatile ("cli":::"memory");
}
static inline void raw_local_irq_disable (void)
{
native_irq_disable ();
}
struct buffer_head
{
unsigned long b_blocknr;
unsigned long b_size;
struct block_device *b_bdev;
};
static inline void get_bh (struct buffer_head *bh)
{
}
struct bh_lru
{
struct buffer_head *bhs[8];
};
static
__attribute__ ((section (".data.percpu" ""))) __typeof__ (struct bh_lru)
per_cpu__bh_lrus =
{
};
static struct buffer_head *
lookup_bh_lru (struct block_device *bdev, unsigned long block, unsigned size)
{
struct buffer_head *ret = ((void *) 0);
struct bh_lru *lru;
unsigned int i;
raw_local_irq_disable ();
lru = &(*(
{
unsigned long __ptr;
__asm__ ("": "=r" (__ptr):"0" ((&per_cpu__bh_lrus)));
(typeof ((&per_cpu__bh_lrus))) (__ptr +
(((
{
typeof(per_cpu__this_cpu_off) ret__;
asm ("mov" "q " "%%" "gs" ":%P" "1" ",%0": "=r" (ret__):"m" (per_cpu__this_cpu_off));
ret__;}
))));}
));
for (i = 0; i < 8; i++)
{
struct buffer_head *bh = lru->bhs[i];
if (bh && bh->b_bdev == bdev && bh->b_blocknr == block
&& bh->b_size == size)
{
if (i)
{
while (i)
{
lru->bhs[i] = lru->bhs[i - 1];
i--;
}
}
ret = bh;
break;
}
}
}
struct buffer_head *
__find_get_block (struct block_device *bdev, unsigned long block, unsigned size)
{
struct buffer_head *bh = lookup_bh_lru (bdev, block, size);
}
bug860.patch
Description: Binary data
------------------------------------------------------------------------------ All the data continuously generated in your IT infrastructure contains a definitive record of customers, application performance, security threats, fraudulent activity, and more. Splunk takes this data and makes sense of it. IT sense. And common sense. http://p.sf.net/sfu/splunk-novd2d
_______________________________________________ Open64-devel mailing list [email protected] https://lists.sourceforge.net/lists/listinfo/open64-devel
