https://gcc.gnu.org/g:c087dd8bb06038248917ec1968a5acfac1d1a88c
commit c087dd8bb06038248917ec1968a5acfac1d1a88c Author: Owen Avery <powerboat9.ga...@gmail.com> Date: Wed May 14 18:22:33 2025 -0400 nr2.0: Fix some paths in test files This is similar to 9faba024ef18b9c4d67f22bd3b020b5e445fad0a, but it applies to execute tests. gcc/testsuite/ChangeLog: * rust/execute/torture/for-loop1.rs: Adjust paths. * rust/execute/torture/for-loop2.rs: Likewise. * rust/execute/torture/iter1.rs: Likewise. Signed-off-by: Owen Avery <powerboat9.ga...@gmail.com> Diff: --- gcc/testsuite/rust/execute/torture/for-loop1.rs | 38 ++++++++++++------------- gcc/testsuite/rust/execute/torture/for-loop2.rs | 38 ++++++++++++------------- gcc/testsuite/rust/execute/torture/iter1.rs | 38 ++++++++++++------------- 3 files changed, 57 insertions(+), 57 deletions(-) diff --git a/gcc/testsuite/rust/execute/torture/for-loop1.rs b/gcc/testsuite/rust/execute/torture/for-loop1.rs index 5a6a70c37d64..334218927b13 100644 --- a/gcc/testsuite/rust/execute/torture/for-loop1.rs +++ b/gcc/testsuite/rust/execute/torture/for-loop1.rs @@ -102,30 +102,30 @@ mod ptr { #[lang = "const_ptr"] impl<T> *const T { pub unsafe fn offset(self, count: isize) -> *const T { - intrinsics::offset(self, count) + crate::intrinsics::offset(self, count) } } #[lang = "mut_ptr"] impl<T> *mut T { pub unsafe fn offset(self, count: isize) -> *mut T { - intrinsics::offset(self, count) as *mut T + crate::intrinsics::offset(self, count) as *mut T } } pub unsafe fn swap_nonoverlapping<T>(x: *mut T, y: *mut T, count: usize) { let x = x as *mut u8; let y = y as *mut u8; - let len = mem::size_of::<T>() * count; + let len = crate::mem::size_of::<T>() * count; swap_nonoverlapping_bytes(x, y, len) } pub unsafe fn swap_nonoverlapping_one<T>(x: *mut T, y: *mut T) { // For types smaller than the block optimization below, // just swap directly to avoid pessimizing codegen. - if mem::size_of::<T>() < 32 { + if crate::mem::size_of::<T>() < 32 { let z = read(x); - intrinsics::copy_nonoverlapping(y, x, 1); + crate::intrinsics::copy_nonoverlapping(y, x, 1); write(y, z); } else { swap_nonoverlapping(x, y, 1); @@ -133,12 +133,12 @@ mod ptr { } pub unsafe fn write<T>(dst: *mut T, src: T) { - intrinsics::move_val_init(&mut *dst, src) + crate::intrinsics::move_val_init(&mut *dst, src) } pub unsafe fn read<T>(src: *const T) -> T { - let mut tmp: T = mem::uninitialized(); - intrinsics::copy_nonoverlapping(src, &mut tmp, 1); + let mut tmp: T = crate::mem::uninitialized(); + crate::intrinsics::copy_nonoverlapping(src, &mut tmp, 1); tmp } @@ -146,7 +146,7 @@ mod ptr { struct Block(u64, u64, u64, u64); struct UnalignedBlock(u64, u64, u64, u64); - let block_size = mem::size_of::<Block>(); + let block_size = crate::mem::size_of::<Block>(); // Loop through x & y, copying them `Block` at a time // The optimizer should unroll the loop fully for most types @@ -155,31 +155,31 @@ mod ptr { while i + block_size <= len { // Create some uninitialized memory as scratch space // Declaring `t` here avoids aligning the stack when this loop is unused - let mut t: Block = mem::uninitialized(); + let mut t: Block = crate::mem::uninitialized(); let t = &mut t as *mut _ as *mut u8; let x = x.offset(i as isize); let y = y.offset(i as isize); // Swap a block of bytes of x & y, using t as a temporary buffer // This should be optimized into efficient SIMD operations where available - intrinsics::copy_nonoverlapping(x, t, block_size); - intrinsics::copy_nonoverlapping(y, x, block_size); - intrinsics::copy_nonoverlapping(t, y, block_size); + crate::intrinsics::copy_nonoverlapping(x, t, block_size); + crate::intrinsics::copy_nonoverlapping(y, x, block_size); + crate::intrinsics::copy_nonoverlapping(t, y, block_size); i += block_size; } if i < len { // Swap any remaining bytes - let mut t: UnalignedBlock = mem::uninitialized(); + let mut t: UnalignedBlock = crate::mem::uninitialized(); let rem = len - i; let t = &mut t as *mut _ as *mut u8; let x = x.offset(i as isize); let y = y.offset(i as isize); - intrinsics::copy_nonoverlapping(x, t, rem); - intrinsics::copy_nonoverlapping(y, x, rem); - intrinsics::copy_nonoverlapping(t, y, rem); + crate::intrinsics::copy_nonoverlapping(x, t, rem); + crate::intrinsics::copy_nonoverlapping(y, x, rem); + crate::intrinsics::copy_nonoverlapping(t, y, rem); } } } @@ -194,7 +194,7 @@ mod mem { pub fn swap<T>(x: &mut T, y: &mut T) { unsafe { - ptr::swap_nonoverlapping_one(x, y); + crate::ptr::swap_nonoverlapping_one(x, y); } } @@ -204,7 +204,7 @@ mod mem { } pub unsafe fn uninitialized<T>() -> T { - intrinsics::uninit() + crate::intrinsics::uninit() } } diff --git a/gcc/testsuite/rust/execute/torture/for-loop2.rs b/gcc/testsuite/rust/execute/torture/for-loop2.rs index 5ba2cd1351a6..4f5dfe1a0a26 100644 --- a/gcc/testsuite/rust/execute/torture/for-loop2.rs +++ b/gcc/testsuite/rust/execute/torture/for-loop2.rs @@ -101,30 +101,30 @@ mod ptr { #[lang = "const_ptr"] impl<T> *const T { pub unsafe fn offset(self, count: isize) -> *const T { - intrinsics::offset(self, count) + crate::intrinsics::offset(self, count) } } #[lang = "mut_ptr"] impl<T> *mut T { pub unsafe fn offset(self, count: isize) -> *mut T { - intrinsics::offset(self, count) as *mut T + crate::intrinsics::offset(self, count) as *mut T } } pub unsafe fn swap_nonoverlapping<T>(x: *mut T, y: *mut T, count: usize) { let x = x as *mut u8; let y = y as *mut u8; - let len = mem::size_of::<T>() * count; + let len = crate::mem::size_of::<T>() * count; swap_nonoverlapping_bytes(x, y, len) } pub unsafe fn swap_nonoverlapping_one<T>(x: *mut T, y: *mut T) { // For types smaller than the block optimization below, // just swap directly to avoid pessimizing codegen. - if mem::size_of::<T>() < 32 { + if crate::mem::size_of::<T>() < 32 { let z = read(x); - intrinsics::copy_nonoverlapping(y, x, 1); + crate::intrinsics::copy_nonoverlapping(y, x, 1); write(y, z); } else { swap_nonoverlapping(x, y, 1); @@ -132,12 +132,12 @@ mod ptr { } pub unsafe fn write<T>(dst: *mut T, src: T) { - intrinsics::move_val_init(&mut *dst, src) + crate::intrinsics::move_val_init(&mut *dst, src) } pub unsafe fn read<T>(src: *const T) -> T { - let mut tmp: T = mem::uninitialized(); - intrinsics::copy_nonoverlapping(src, &mut tmp, 1); + let mut tmp: T = crate::mem::uninitialized(); + crate::intrinsics::copy_nonoverlapping(src, &mut tmp, 1); tmp } @@ -145,7 +145,7 @@ mod ptr { struct Block(u64, u64, u64, u64); struct UnalignedBlock(u64, u64, u64, u64); - let block_size = mem::size_of::<Block>(); + let block_size = crate::mem::size_of::<Block>(); // Loop through x & y, copying them `Block` at a time // The optimizer should unroll the loop fully for most types @@ -154,31 +154,31 @@ mod ptr { while i + block_size <= len { // Create some uninitialized memory as scratch space // Declaring `t` here avoids aligning the stack when this loop is unused - let mut t: Block = mem::uninitialized(); + let mut t: Block = crate::mem::uninitialized(); let t = &mut t as *mut _ as *mut u8; let x = x.offset(i as isize); let y = y.offset(i as isize); // Swap a block of bytes of x & y, using t as a temporary buffer // This should be optimized into efficient SIMD operations where available - intrinsics::copy_nonoverlapping(x, t, block_size); - intrinsics::copy_nonoverlapping(y, x, block_size); - intrinsics::copy_nonoverlapping(t, y, block_size); + crate::intrinsics::copy_nonoverlapping(x, t, block_size); + crate::intrinsics::copy_nonoverlapping(y, x, block_size); + crate::intrinsics::copy_nonoverlapping(t, y, block_size); i += block_size; } if i < len { // Swap any remaining bytes - let mut t: UnalignedBlock = mem::uninitialized(); + let mut t: UnalignedBlock = crate::mem::uninitialized(); let rem = len - i; let t = &mut t as *mut _ as *mut u8; let x = x.offset(i as isize); let y = y.offset(i as isize); - intrinsics::copy_nonoverlapping(x, t, rem); - intrinsics::copy_nonoverlapping(y, x, rem); - intrinsics::copy_nonoverlapping(t, y, rem); + crate::intrinsics::copy_nonoverlapping(x, t, rem); + crate::intrinsics::copy_nonoverlapping(y, x, rem); + crate::intrinsics::copy_nonoverlapping(t, y, rem); } } } @@ -193,7 +193,7 @@ mod mem { pub fn swap<T>(x: &mut T, y: &mut T) { unsafe { - ptr::swap_nonoverlapping_one(x, y); + crate::ptr::swap_nonoverlapping_one(x, y); } } @@ -203,7 +203,7 @@ mod mem { } pub unsafe fn uninitialized<T>() -> T { - intrinsics::uninit() + crate::intrinsics::uninit() } } diff --git a/gcc/testsuite/rust/execute/torture/iter1.rs b/gcc/testsuite/rust/execute/torture/iter1.rs index c3b6c7bc3f89..233eb60383b3 100644 --- a/gcc/testsuite/rust/execute/torture/iter1.rs +++ b/gcc/testsuite/rust/execute/torture/iter1.rs @@ -99,30 +99,30 @@ mod ptr { #[lang = "const_ptr"] impl<T> *const T { pub unsafe fn offset(self, count: isize) -> *const T { - intrinsics::offset(self, count) + crate::intrinsics::offset(self, count) } } #[lang = "mut_ptr"] impl<T> *mut T { pub unsafe fn offset(self, count: isize) -> *mut T { - intrinsics::offset(self, count) as *mut T + crate::intrinsics::offset(self, count) as *mut T } } pub unsafe fn swap_nonoverlapping<T>(x: *mut T, y: *mut T, count: usize) { let x = x as *mut u8; let y = y as *mut u8; - let len = mem::size_of::<T>() * count; + let len = crate::mem::size_of::<T>() * count; swap_nonoverlapping_bytes(x, y, len) } pub unsafe fn swap_nonoverlapping_one<T>(x: *mut T, y: *mut T) { // For types smaller than the block optimization below, // just swap directly to avoid pessimizing codegen. - if mem::size_of::<T>() < 32 { + if crate::mem::size_of::<T>() < 32 { let z = read(x); - intrinsics::copy_nonoverlapping(y, x, 1); + crate::intrinsics::copy_nonoverlapping(y, x, 1); write(y, z); } else { swap_nonoverlapping(x, y, 1); @@ -130,12 +130,12 @@ mod ptr { } pub unsafe fn write<T>(dst: *mut T, src: T) { - intrinsics::move_val_init(&mut *dst, src) + crate::intrinsics::move_val_init(&mut *dst, src) } pub unsafe fn read<T>(src: *const T) -> T { - let mut tmp: T = mem::uninitialized(); - intrinsics::copy_nonoverlapping(src, &mut tmp, 1); + let mut tmp: T = crate::mem::uninitialized(); + crate::intrinsics::copy_nonoverlapping(src, &mut tmp, 1); tmp } @@ -143,7 +143,7 @@ mod ptr { struct Block(u64, u64, u64, u64); struct UnalignedBlock(u64, u64, u64, u64); - let block_size = mem::size_of::<Block>(); + let block_size = crate::mem::size_of::<Block>(); // Loop through x & y, copying them `Block` at a time // The optimizer should unroll the loop fully for most types @@ -152,31 +152,31 @@ mod ptr { while i + block_size <= len { // Create some uninitialized memory as scratch space // Declaring `t` here avoids aligning the stack when this loop is unused - let mut t: Block = mem::uninitialized(); + let mut t: Block = crate::mem::uninitialized(); let t = &mut t as *mut _ as *mut u8; let x = x.offset(i as isize); let y = y.offset(i as isize); // Swap a block of bytes of x & y, using t as a temporary buffer // This should be optimized into efficient SIMD operations where available - intrinsics::copy_nonoverlapping(x, t, block_size); - intrinsics::copy_nonoverlapping(y, x, block_size); - intrinsics::copy_nonoverlapping(t, y, block_size); + crate::intrinsics::copy_nonoverlapping(x, t, block_size); + crate::intrinsics::copy_nonoverlapping(y, x, block_size); + crate::intrinsics::copy_nonoverlapping(t, y, block_size); i += block_size; } if i < len { // Swap any remaining bytes - let mut t: UnalignedBlock = mem::uninitialized(); + let mut t: UnalignedBlock = crate::mem::uninitialized(); let rem = len - i; let t = &mut t as *mut _ as *mut u8; let x = x.offset(i as isize); let y = y.offset(i as isize); - intrinsics::copy_nonoverlapping(x, t, rem); - intrinsics::copy_nonoverlapping(y, x, rem); - intrinsics::copy_nonoverlapping(t, y, rem); + crate::intrinsics::copy_nonoverlapping(x, t, rem); + crate::intrinsics::copy_nonoverlapping(y, x, rem); + crate::intrinsics::copy_nonoverlapping(t, y, rem); } } } @@ -191,7 +191,7 @@ mod mem { pub fn swap<T>(x: &mut T, y: &mut T) { unsafe { - ptr::swap_nonoverlapping_one(x, y); + crate::ptr::swap_nonoverlapping_one(x, y); } } @@ -201,7 +201,7 @@ mod mem { } pub unsafe fn uninitialized<T>() -> T { - intrinsics::uninit() + crate::intrinsics::uninit() } }