Andreas Sandberg has uploaded this change for review. ( https://gem5-review.googlesource.com/c/public/gem5/+/39475 )

Change subject: sim, mem, dev, arch: Consistently use ISO prefixes
......................................................................

sim, mem, dev, arch: Consistently use ISO prefixes

We currently use a the ambiguous JEDEC prefixes (e.g., MB) in a lot of
places instead of the unambiguous ISO/IEC prefixes (e.g., MiB). This
change replaces most the old prefixes with the new ISO/IEC prefixes in
the code base.

Change-Id: I0849b97d75e17fca2c782166185f41dd2cf6b0a5
Signed-off-by: Andreas Sandberg <andreas.sandb...@arm.com>
---
M src/arch/arm/ArmSemihosting.py
M src/arch/arm/table_walker.cc
M src/arch/arm/table_walker.hh
M src/arch/mips/process.cc
M src/arch/x86/pagetable_walker.cc
M src/dev/arm/FlashDevice.py
M src/dev/arm/RealView.py
M src/dev/net/Ethernet.py
M src/dev/pci/CopyEngine.py
M src/dev/x86/Pc.py
M src/gpu-compute/GPU.py
M src/gpu-compute/LdsState.py
M src/learning_gem5/part2/HelloObject.py
M src/learning_gem5/part2/SimpleCache.py
M src/mem/AbstractMemory.py
M src/mem/DRAMInterface.py
M src/mem/NVMInterface.py
M src/mem/SimpleMemory.py
M src/mem/XBar.py
M src/mem/cache/prefetch/Prefetcher.py
M src/mem/cache/tags/Tags.py
M src/python/m5/params.py
M src/sim/Process.py
M src/sim/syscall_emul.hh
24 files changed, 138 insertions(+), 136 deletions(-)



diff --git a/src/arch/arm/ArmSemihosting.py b/src/arch/arm/ArmSemihosting.py
index e445590..8674edc 100644
--- a/src/arch/arm/ArmSemihosting.py
+++ b/src/arch/arm/ArmSemihosting.py
@@ -53,10 +53,10 @@
     files_root_dir = Param.String("",
         "Host root directory for files handled by Semihosting")

-    mem_reserve = Param.MemorySize("32MB",
+    mem_reserve = Param.MemorySize("32MiB",
"Amount of memory to reserve at the start of the address map. This "
         "memory won't be used by the heap reported to an application.");
-    stack_size = Param.MemorySize("32MB", "Application stack size");
+    stack_size = Param.MemorySize("32MiB", "Application stack size");

     time = Param.Time('01/01/2009',
                       "System time to use ('Now' for actual time)")
diff --git a/src/arch/arm/table_walker.cc b/src/arch/arm/table_walker.cc
index e658b02..7f19adb 100644
--- a/src/arch/arm/table_walker.cc
+++ b/src/arch/arm/table_walker.cc
@@ -648,7 +648,7 @@
                 MISCREG_TTBR0, currState->tc, !currState->isSecure));
             tsz = currState->ttbcr.t0sz;
             currState->isUncacheable = currState->ttbcr.irgn0 == 0;
-            if (ttbr0_max < (1ULL << 30))  // Upper limit < 1 GB
+            if (ttbr0_max < (1ULL << 30))  // Upper limit < 1 GiB
                 start_lookup_level = L2;
         } else if (currState->vaddr >= ttbr1_min) {
             DPRINTF(TLB, " - Selecting TTBR1 (long-desc.)\n");
@@ -673,7 +673,7 @@
                 MISCREG_TTBR1, currState->tc, !currState->isSecure));
             tsz = currState->ttbcr.t1sz;
             currState->isUncacheable = currState->ttbcr.irgn1 == 0;
-            // Lower limit >= 3 GB
+            // Lower limit >= 3 GiB
             if (ttbr1_min >= (1ULL << 31) + (1ULL << 30))
                 start_lookup_level = L2;
         } else {
@@ -2379,16 +2379,16 @@
     pageSizes // see DDI 0487A D4-1661
         .init(10)
         .flags(Stats::total | Stats::pdf | Stats::dist | Stats::nozero);
-    pageSizes.subname(0, "4K");
-    pageSizes.subname(1, "16K");
-    pageSizes.subname(2, "64K");
-    pageSizes.subname(3, "1M");
-    pageSizes.subname(4, "2M");
-    pageSizes.subname(5, "16M");
-    pageSizes.subname(6, "32M");
-    pageSizes.subname(7, "512M");
-    pageSizes.subname(8, "1G");
-    pageSizes.subname(9, "4TB");
+    pageSizes.subname(0, "4KiB");
+    pageSizes.subname(1, "16KiB");
+    pageSizes.subname(2, "64KiB");
+    pageSizes.subname(3, "1MiB");
+    pageSizes.subname(4, "2MiB");
+    pageSizes.subname(5, "16MiB");
+    pageSizes.subname(6, "32MiB");
+    pageSizes.subname(7, "512MiB");
+    pageSizes.subname(8, "1GiB");
+    pageSizes.subname(9, "4TiB");

     requestOrigin
         .init(2,2) // Instruction/Data, requests/completed
diff --git a/src/arch/arm/table_walker.hh b/src/arch/arm/table_walker.hh
index dbb480e..f4ee552 100644
--- a/src/arch/arm/table_walker.hh
+++ b/src/arch/arm/table_walker.hh
@@ -132,7 +132,7 @@
             return (EntryType)(data & 0x3);
         }

-        /** Is the page a Supersection (16MB)?*/
+        /** Is the page a Supersection (16 MiB)?*/
         bool supersection() const
         {
             return bits(data, 18);
@@ -434,8 +434,8 @@
         {
             switch (bits(data, 1, 0)) {
               case 0x1:
- // In AArch64 blocks are not allowed at L0 for the 4 KB granule
-                // and at L1 for 16/64 KB granules
+                // In AArch64 blocks are not allowed at L0 for the
+                // 4 KiB granule and at L1 for 16/64 KiB granules
                 switch (grainSize) {
                   case Grain4KB:
                     if (lookupLevel == L0 || lookupLevel == L3)
@@ -451,7 +451,7 @@

                   case Grain64KB:
                     // With Armv8.2-LPA (52bit PA) L1 Block descriptors
-                    // are allowed for 64KB granule
+                    // are allowed for 64KiB granule
                     if ((lookupLevel == L1 && physAddrRange == 52) ||
                         lookupLevel == L2)
                         return Block;
@@ -474,13 +474,13 @@
             if (type() == Block) {
                 switch (grainSize) {
                     case Grain4KB:
-                        return lookupLevel == L1 ? 30 /* 1 GB */
-                                                 : 21 /* 2 MB */;
+                        return lookupLevel == L1 ? 30 /* 1 GiB */
+                                                 : 21 /* 2 MiB */;
                     case Grain16KB:
-                        return 25  /* 32 MB */;
+                        return 25  /* 32 MiB */;
                     case Grain64KB:
-                        return lookupLevel == L1 ? 42 /* 4TB MB */
-                                                 : 29 /* 512 MB */;
+                        return lookupLevel == L1 ? 42 /* 4 TiB */
+                                                 : 29 /* 512 MiB */;
                     default:
                         panic("Invalid AArch64 VM granule size\n");
                 }
diff --git a/src/arch/mips/process.cc b/src/arch/mips/process.cc
index 44f4f32..1cf18e2 100644
--- a/src/arch/mips/process.cc
+++ b/src/arch/mips/process.cc
@@ -65,7 +65,7 @@
     Addr brk_point = image.maxAddr();
     brk_point = roundUp(brk_point, PageBytes);

-    // Set up region for mmaps.  Start it 1GB above the top of the heap.
+    // Set up region for mmaps.  Start it 1GiB above the top of the heap.
     Addr mmap_end = brk_point + 0x40000000L;

     memState = make_shared<MemState>(this, brk_point, stack_base,
diff --git a/src/arch/x86/pagetable_walker.cc b/src/arch/x86/pagetable_walker.cc
index 4b7b88c..fa4a049 100644
--- a/src/arch/x86/pagetable_walker.cc
+++ b/src/arch/x86/pagetable_walker.cc
@@ -338,14 +338,14 @@
             break;
         }
         if (!pte.ps) {
-            // 4 KB page
+            // 4 KiB page
             entry.logBytes = 12;
             nextRead =
((uint64_t)pte & (mask(40) << 12)) + vaddr.longl1 * dataSize;
             nextState = LongPTE;
             break;
         } else {
-            // 2 MB page
+            // 2 MiB page
             entry.logBytes = 21;
             entry.paddr = (uint64_t)pte & (mask(31) << 21);
             entry.uncacheable = uncacheable;
@@ -400,13 +400,13 @@
             break;
         }
         if (!pte.ps) {
-            // 4 KB page
+            // 4 KiB page
             entry.logBytes = 12;
nextRead = ((uint64_t)pte & (mask(40) << 12)) + vaddr.pael1 * dataSize;
             nextState = PAEPTE;
             break;
         } else {
-            // 2 MB page
+            // 2 MiB page
             entry.logBytes = 21;
             entry.paddr = (uint64_t)pte & (mask(31) << 21);
             entry.uncacheable = uncacheable;
@@ -450,14 +450,14 @@
             break;
         }
         if (!pte.ps) {
-            // 4 KB page
+            // 4 KiB page
             entry.logBytes = 12;
             nextRead =
((uint64_t)pte & (mask(20) << 12)) + vaddr.norml2 * dataSize;
             nextState = PTE;
             break;
         } else {
-            // 4 MB page
+            // 4 MiB page
             entry.logBytes = 21;
entry.paddr = bits(pte, 20, 13) << 32 | bits(pte, 31, 22) << 22;
             entry.uncacheable = uncacheable;
@@ -480,7 +480,7 @@
             fault = pageFault(pte.p);
             break;
         }
-        // 4 KB page
+        // 4 KiB page
         entry.logBytes = 12;
nextRead = ((uint64_t)pte & (mask(20) << 12)) + vaddr.norml2 * dataSize;
         nextState = PTE;
diff --git a/src/dev/arm/FlashDevice.py b/src/dev/arm/FlashDevice.py
index ebaabc3..6455bbf 100644
--- a/src/dev/arm/FlashDevice.py
+++ b/src/dev/arm/FlashDevice.py
@@ -47,12 +47,12 @@
 class FlashDevice(AbstractNVM):
     type = 'FlashDevice'
     cxx_header = "dev/arm/flash_device.hh"
-    # default blocksize is 128 kB.This seems to be the most common size in
+    # default blocksize is 128 KiB.This seems to be the most common size in
     # mobile devices (not the image blocksize)
-    blk_size = Param.MemorySize("128kB", "Size of one disk block")
-    # disk page size is 2 kB. This is the most commonly used page size in
+    blk_size = Param.MemorySize("128KiB", "Size of one disk block")
+    # disk page size is 2 KiB. This is the most commonly used page size in
     # flash devices
-    page_size = Param.MemorySize("2kB", "Size of one disk page")
+    page_size = Param.MemorySize("2KiB", "Size of one disk page")
     # There are many GC flavors. It is impossible to cover them all; this
     # parameter enables the approximation of different GC algorithms
GC_active = Param.Percent(50, "Percentage of the time (in whole numbers) \
diff --git a/src/dev/arm/RealView.py b/src/dev/arm/RealView.py
index 8fa0edd..fee76a5 100644
--- a/src/dev/arm/RealView.py
+++ b/src/dev/arm/RealView.py
@@ -510,7 +510,7 @@
     frame_format = Param.ImageFormat("Auto",
                                      "image format of the captured frame")

-    pixel_buffer_size = Param.MemorySize32("2kB", "Size of address range")
+    pixel_buffer_size = Param.MemorySize32("2KiB", "Size of address range")

     pxl_clk = Param.ClockDomain("Pixel clock source")
pixel_chunk = Param.Unsigned(32, "Number of pixels to handle in one batch")
@@ -645,7 +645,7 @@
     type = 'RealView'
     cxx_header = "dev/arm/realview.hh"
     system = Param.System(Parent.any, "system")
-    _mem_regions = [ AddrRange(0, size='256MB') ]
+    _mem_regions = [ AddrRange(0, size='256MiB') ]
     _num_pci_dev = 0

     def _on_chip_devices(self):
@@ -742,15 +742,15 @@
                         state.addrCells(system.workload.cpu_release_addr)))

 class VExpress_EMM(RealView):
-    _mem_regions = [ AddrRange('2GB', size='2GB') ]
+    _mem_regions = [ AddrRange('2GiB', size='2GiB') ]

     # Ranges based on excluding what is part of on-chip I/O (gic,
     # a9scu)
-    _off_chip_ranges = [AddrRange(0x2F000000, size='16MB'),
-                        AddrRange(0x30000000, size='256MB'),
-                        AddrRange(0x40000000, size='512MB'),
-                        AddrRange(0x18000000, size='64MB'),
-                        AddrRange(0x1C000000, size='64MB')]
+    _off_chip_ranges = [AddrRange(0x2F000000, size='16MiB'),
+                        AddrRange(0x30000000, size='256MiB'),
+                        AddrRange(0x40000000, size='512MiB'),
+                        AddrRange(0x18000000, size='64MiB'),
+                        AddrRange(0x1C000000, size='64MiB')]

     # Platform control device (off-chip)
     realview_io = RealViewCtrl(proc_id0=0x14000000, proc_id1=0x14000000,
@@ -790,7 +790,7 @@
     ### Off-chip devices ###
     uart = Pl011(pio_addr=0x1c090000, interrupt=ArmSPI(num=37))
     pci_host = GenericPciHost(
-        conf_base=0x30000000, conf_size='256MB', conf_device_bits=16,
+        conf_base=0x30000000, conf_size='256MiB', conf_device_bits=16,
         pci_pio_base=0)

     sys_counter = SystemCounter()
@@ -814,9 +814,9 @@
     cf_ctrl.BAR0 = PciLegacyIoBar(addr='0x1C1A0000', size='256B')
     cf_ctrl.BAR1 = PciLegacyIoBar(addr='0x1C1A0100', size='4096B')

-    bootmem        = SimpleMemory(range = AddrRange('64MB'),
+    bootmem        = SimpleMemory(range = AddrRange('64MiB'),
                                   conf_table_reported = False)
- vram = SimpleMemory(range = AddrRange(0x18000000, size='32MB'), + vram = SimpleMemory(range = AddrRange(0x18000000, size='32MiB'),
                                   conf_table_reported = False)
     rtc            = PL031(pio_addr=0x1C170000, interrupt=ArmSPI(num=36))

@@ -884,12 +884,12 @@
                 cur_sys, boot_loader, 0x8000000, 0x80000000)

 class VExpress_EMM64(VExpress_EMM):
-    # Three memory regions are specified totalling 512GB
-    _mem_regions = [ AddrRange('2GB', size='2GB'),
-                     AddrRange('34GB', size='30GB'),
-                     AddrRange('512GB', size='480GB') ]
+    # Three memory regions are specified totalling 512GiB
+    _mem_regions = [ AddrRange('2GiB', size='2GiB'),
+                     AddrRange('34GiB', size='30GiB'),
+                     AddrRange('512GiB', size='480GiB') ]
     pci_host = GenericPciHost(
-        conf_base=0x30000000, conf_size='256MB', conf_device_bits=12,
+        conf_base=0x30000000, conf_size='256MiB', conf_device_bits=12,
         pci_pio_base=0x2f000000)

     def setupBootLoader(self, cur_sys, loc, boot_loader=None):
@@ -1038,7 +1038,7 @@
     """

     # Everything above 2GiB is memory
-    _mem_regions = [ AddrRange('2GB', size='510GB') ]
+    _mem_regions = [ AddrRange('2GiB', size='510GiB') ]

     _off_chip_ranges = [
         # CS1-CS5
@@ -1047,15 +1047,15 @@
         AddrRange(0x2f000000, 0x80000000),
     ]

-    bootmem = SimpleMemory(range=AddrRange(0, size='64MB'),
+    bootmem = SimpleMemory(range=AddrRange(0, size='64MiB'),
                            conf_table_reported=False)

     # NOR flash, flash0
-    flash0 = SimpleMemory(range=AddrRange(0x08000000, size='64MB'),
+    flash0 = SimpleMemory(range=AddrRange(0x08000000, size='64MiB'),
                           conf_table_reported=False)

     # Trusted SRAM
-    trusted_sram = SimpleMemory(range=AddrRange(0x04000000, size='256kB'),
+    trusted_sram = SimpleMemory(range=AddrRange(0x04000000, size='256KiB'),
                                 conf_table_reported=False)

     # Non-Trusted SRAM
@@ -1134,7 +1134,7 @@

     ### gem5-specific off-chip devices ###
     pci_host = GenericArmPciHost(
-        conf_base=0x30000000, conf_size='256MB', conf_device_bits=12,
+        conf_base=0x30000000, conf_size='256MiB', conf_device_bits=12,
         pci_pio_base=0x2f000000,
         pci_mem_base=0x40000000,
         int_policy="ARM_PCI_INT_DEV", int_base=100, int_count=4)
@@ -1354,7 +1354,7 @@
                 its=NULL)

     pci_host = GenericArmPciHost(
-        conf_base=0x40000000, conf_size='256MB', conf_device_bits=12,
+        conf_base=0x40000000, conf_size='256MiB', conf_device_bits=12,
         pci_pio_base=0x50000000,
         pci_mem_base=0x400000000,
         int_policy="ARM_PCI_INT_DEV", int_base=100, int_count=4)
diff --git a/src/dev/net/Ethernet.py b/src/dev/net/Ethernet.py
index dd878e2..e5c5562 100644
--- a/src/dev/net/Ethernet.py
+++ b/src/dev/net/Ethernet.py
@@ -92,10 +92,11 @@
     type = 'EtherSwitch'
     cxx_header = "dev/net/etherswitch.hh"
     dump = Param.EtherDump(NULL, "dump object")
- fabric_speed = Param.NetworkBandwidth('10Gbps', "switch fabric speed in bits "
-                                          "per second")
+ fabric_speed = Param.NetworkBandwidth('10Gbps', "switch fabric speed in "
+                                          "bits per second")
     interface = VectorEtherInt("Ethernet Interface")
- output_buffer_size = Param.MemorySize('1MB', "size of output port buffers")
+    output_buffer_size = Param.MemorySize('1MiB',
+                                          "size of output port buffers")
     delay = Param.Latency('0us', "packet transmit delay")
     delay_var = Param.Latency('0ns', "packet transmit delay variability")
time_to_live = Param.Latency('10ms', "time to live of MAC address maping")
@@ -139,8 +140,8 @@
     cxx_header = "dev/net/i8254xGBe.hh"
     hardware_address = Param.EthernetAddr(NextEthernetAddr,
         "Ethernet Hardware Address")
-    rx_fifo_size = Param.MemorySize('384kB', "Size of the rx FIFO")
-    tx_fifo_size = Param.MemorySize('384kB', "Size of the tx FIFO")
+    rx_fifo_size = Param.MemorySize('384KiB', "Size of the rx FIFO")
+    tx_fifo_size = Param.MemorySize('384KiB', "Size of the tx FIFO")
     rx_desc_cache_size = Param.Int(64,
         "Number of enteries in the rx descriptor cache")
     tx_desc_cache_size = Param.Int(64,
@@ -152,7 +153,7 @@
     SubClassCode = 0x00
     ClassCode = 0x02
     ProgIF = 0x00
-    BAR0 = PciMemBar(size='128kB')
+    BAR0 = PciMemBar(size='128KiB')
     MaximumLatency = 0x00
     MinimumGrant = 0xff
     InterruptLine = 0x1e
@@ -195,8 +196,8 @@

     rx_delay = Param.Latency('1us', "Receive Delay")
     tx_delay = Param.Latency('1us', "Transmit Delay")
-    rx_fifo_size = Param.MemorySize('512kB', "max size of rx fifo")
-    tx_fifo_size = Param.MemorySize('512kB', "max size of tx fifo")
+    rx_fifo_size = Param.MemorySize('512KiB', "max size of rx fifo")
+    tx_fifo_size = Param.MemorySize('512KiB', "max size of tx fifo")

     rx_filter = Param.Bool(True, "Enable Receive Filter")
     intr_delay = Param.Latency('10us', "Interrupt propagation delay")
@@ -218,7 +219,7 @@
     SubClassCode = 0x00
     ClassCode = 0x02
     ProgIF = 0x00
-    BARs = (PciIoBar(size='256B'), PciMemBar(size='4kB'))
+    BARs = (PciIoBar(size='256B'), PciMemBar(size='4KiB'))
     MaximumLatency = 0x34
     MinimumGrant = 0xb0
     InterruptLine = 0x1e
@@ -232,12 +233,12 @@
     cxx_header = "dev/net/sinic.hh"

     rx_max_copy = Param.MemorySize('1514B', "rx max copy")
-    tx_max_copy = Param.MemorySize('16kB', "tx max copy")
+    tx_max_copy = Param.MemorySize('16KiB', "tx max copy")
     rx_max_intr = Param.UInt32(10, "max rx packets per interrupt")
-    rx_fifo_threshold = Param.MemorySize('384kB', "rx fifo high threshold")
-    rx_fifo_low_mark = Param.MemorySize('128kB', "rx fifo low threshold")
-    tx_fifo_high_mark = Param.MemorySize('384kB', "tx fifo high threshold")
-    tx_fifo_threshold = Param.MemorySize('128kB', "tx fifo low threshold")
+ rx_fifo_threshold = Param.MemorySize('384KiB', "rx fifo high threshold")
+    rx_fifo_low_mark = Param.MemorySize('128KiB', "rx fifo low threshold")
+ tx_fifo_high_mark = Param.MemorySize('384KiB', "tx fifo high threshold")
+    tx_fifo_threshold = Param.MemorySize('128KiB', "tx fifo low threshold")
     virtual_count = Param.UInt32(1, "Virtualized SINIC")
     zero_copy_size = Param.UInt32(64, "Bytes to copy if below threshold")
     zero_copy_threshold = Param.UInt32(256,
@@ -252,7 +253,7 @@
     SubClassCode = 0x00
     ClassCode = 0x02
     ProgIF = 0x00
-    BARs = PciMemBar(size='64kB')
+    BARs = PciMemBar(size='64KiB')
     MaximumLatency = 0x34
     MinimumGrant = 0xb0
     InterruptLine = 0x1e
diff --git a/src/dev/pci/CopyEngine.py b/src/dev/pci/CopyEngine.py
index f5a0f9e..62d9bd7 100644
--- a/src/dev/pci/CopyEngine.py
+++ b/src/dev/pci/CopyEngine.py
@@ -48,10 +48,10 @@
     InterruptLine = 0x20
     InterruptPin = 0x01

-    BAR0 = PciMemBar(size='1kB')
+    BAR0 = PciMemBar(size='1KiB')

     ChanCnt = Param.UInt8(4, "Number of DMA channels that exist on device")
-    XferCap = Param.MemorySize('4kB',
+    XferCap = Param.MemorySize('4KiB',
             "Number of bits of transfer size that are supported")

     latBeforeBegin = Param.Latency('20ns',
diff --git a/src/dev/x86/Pc.py b/src/dev/x86/Pc.py
index 0ed2648..736f068 100644
--- a/src/dev/x86/Pc.py
+++ b/src/dev/x86/Pc.py
@@ -41,7 +41,7 @@

 class PcPciHost(GenericPciHost):
     conf_base = 0xC000000000000000
-    conf_size = "16MB"
+    conf_size = "16MiB"

     pci_pio_base = 0x8000000000000000

@@ -70,7 +70,7 @@
     default_bus = IOXBar()

     # A device to handle accesses to unclaimed IO ports.
-    empty_isa = IsaFake(pio_addr=x86IOAddress(0), pio_size='64kB',
+    empty_isa = IsaFake(pio_addr=x86IOAddress(0), pio_size='64KiB',
ret_data8=0, ret_data16=0, ret_data32=0, ret_data64=0,
                         pio=default_bus.mem_side_ports)

diff --git a/src/gpu-compute/GPU.py b/src/gpu-compute/GPU.py
index d2959ac..929299b 100644
--- a/src/gpu-compute/GPU.py
+++ b/src/gpu-compute/GPU.py
@@ -224,7 +224,7 @@
                                          ruby at kernel launch""")
     impl_kern_end_rel = Param.Bool(False, """Insert rel packet into
                                          ruby at kernel end""")
-    globalmem = Param.MemorySize('64kB', 'Memory size')
+    globalmem = Param.MemorySize('64KiB', 'Memory size')
     timing = Param.Bool(False, 'timing memory accesses')

     cpu_pointer = Param.BaseCPU(NULL, "pointer to base CPU")
diff --git a/src/gpu-compute/LdsState.py b/src/gpu-compute/LdsState.py
index 6bd0a7e..d49338c 100644
--- a/src/gpu-compute/LdsState.py
+++ b/src/gpu-compute/LdsState.py
@@ -40,7 +40,7 @@
     cxx_class = 'LdsState'
     cxx_header = 'gpu-compute/lds_state.hh'
     size = Param.Int(65536, 'the size of the LDS')
-    range = Param.AddrRange('64kB', "address space of the LDS")
+    range = Param.AddrRange('64KiB', "address space of the LDS")
bankConflictPenalty = Param.Int(1, 'penalty per LDS bank conflict when '\
                                     'accessing data')
     banks = Param.Int(32, 'Number of LDS banks')
diff --git a/src/learning_gem5/part2/HelloObject.py b/src/learning_gem5/part2/HelloObject.py
index 91b4125..00f6f48 100644
--- a/src/learning_gem5/part2/HelloObject.py
+++ b/src/learning_gem5/part2/HelloObject.py
@@ -42,7 +42,7 @@
     type = 'GoodbyeObject'
     cxx_header = "learning_gem5/part2/goodbye_object.hh"

-    buffer_size = Param.MemorySize('1kB',
+    buffer_size = Param.MemorySize('1KiB',
                                    "Size of buffer to fill with goodbye")
-    write_bandwidth = Param.MemoryBandwidth('100MB/s', "Bandwidth to fill "
+ write_bandwidth = Param.MemoryBandwidth('100MiB/s', "Bandwidth to fill "
                                             "the buffer")
diff --git a/src/learning_gem5/part2/SimpleCache.py b/src/learning_gem5/part2/SimpleCache.py
index ad94b50..1780e73 100644
--- a/src/learning_gem5/part2/SimpleCache.py
+++ b/src/learning_gem5/part2/SimpleCache.py
@@ -40,6 +40,6 @@

     latency = Param.Cycles(1, "Cycles taken on a hit or to resolve a miss")

-    size = Param.MemorySize('16kB', "The size of the cache")
+    size = Param.MemorySize('16KiB', "The size of the cache")

     system = Param.System(Parent.any, "The system this cache is part of")
diff --git a/src/mem/AbstractMemory.py b/src/mem/AbstractMemory.py
index 4c21d52..e1941c3 100644
--- a/src/mem/AbstractMemory.py
+++ b/src/mem/AbstractMemory.py
@@ -44,9 +44,10 @@
     abstract = True
     cxx_header = "mem/abstract_mem.hh"

-    # A default memory size of 128 MB (starting at 0) is used to
+    # A default memory size of 128 MiB (starting at 0) is used to
     # simplify the regressions
- range = Param.AddrRange('128MB', "Address range (potentially interleaved)")
+    range = Param.AddrRange('128MiB',
+                            "Address range (potentially interleaved)")
     null = Param.Bool(False, "Do not store data, always return zero")

     # All memories are passed to the global physical memory, and
diff --git a/src/mem/DRAMInterface.py b/src/mem/DRAMInterface.py
index 85a6092..4f59498 100644
--- a/src/mem/DRAMInterface.py
+++ b/src/mem/DRAMInterface.py
@@ -259,7 +259,7 @@
 # an 8x8 configuration.
 class DDR3_1600_8x8(DRAMInterface):
     # size of device in bytes
-    device_size = '512MB'
+    device_size = '512MiB'

     # 8x8 configuration, 8 devices each with an 8-bit interface
     device_bus_width = 8
@@ -268,7 +268,7 @@
     burst_length = 8

     # Each device has a page (row buffer) size of 1 Kbyte (1K columns x8)
-    device_rowbuffer_size = '1kB'
+    device_rowbuffer_size = '1KiB'

     # 8x8 configuration, so 8 devices
     devices_per_rank = 8
@@ -338,7 +338,7 @@
# [2] High performance AXI-4.0 based interconnect for extensible smart memory
 # cubes (E. Azarkhish et. al)
 # Assumed for the HMC model is a 30 nm technology node.
-# The modelled HMC consists of 4 Gbit layers which sum up to 2GB of memory (4 +# The modelled HMC consists of 4 Gbit layers which sum up to 2GiB of memory (4
 # layers).
 # Each layer has 16 vaults and each vault consists of 2 banks per layer.
# In order to be able to use the same controller used for 2D DRAM generations
@@ -354,8 +354,8 @@
 # of the HMC
 class HMC_2500_1x32(DDR3_1600_8x8):
     # size of device
-    # two banks per device with each bank 4MB [2]
-    device_size = '8MB'
+    # two banks per device with each bank 4MiB [2]
+    device_size = '8MiB'

     # 1x32 configuration, 1 device with 32 TSVs [2]
     device_bus_width = 32
@@ -458,11 +458,11 @@
 # A single DDR4-2400 x64 channel (one command and address bus), with
 # timings based on a DDR4-2400 8 Gbit datasheet (Micron MT40A2G4)
 # in an 16x4 configuration.
-# Total channel capacity is 32GB
-# 16 devices/rank * 2 ranks/channel * 1GB/device = 32GB/channel
+# Total channel capacity is 32GiB
+# 16 devices/rank * 2 ranks/channel * 1GiB/device = 32GiB/channel
 class DDR4_2400_16x4(DRAMInterface):
     # size of device
-    device_size = '1GB'
+    device_size = '1GiB'

     # 16x4 configuration, 16 devices each with a 4-bit interface
     device_bus_width = 4
@@ -569,14 +569,14 @@
 # A single DDR4-2400 x64 channel (one command and address bus), with
 # timings based on a DDR4-2400 8 Gbit datasheet (Micron MT40A1G8)
 # in an 8x8 configuration.
-# Total channel capacity is 16GB
-# 8 devices/rank * 2 ranks/channel * 1GB/device = 16GB/channel
+# Total channel capacity is 16GiB
+# 8 devices/rank * 2 ranks/channel * 1GiB/device = 16GiB/channel
 class DDR4_2400_8x8(DDR4_2400_16x4):
     # 8x8 configuration, 8 devices each with an 8-bit interface
     device_bus_width = 8

     # Each device has a page (row buffer) size of 1 Kbyte (1K columns x8)
-    device_rowbuffer_size = '1kB'
+    device_rowbuffer_size = '1KiB'

     # 8x8 configuration, so 8 devices
     devices_per_rank = 8
@@ -596,14 +596,14 @@
 # A single DDR4-2400 x64 channel (one command and address bus), with
 # timings based on a DDR4-2400 8 Gbit datasheet (Micron MT40A512M16)
 # in an 4x16 configuration.
-# Total channel capacity is 4GB
-# 4 devices/rank * 1 ranks/channel * 1GB/device = 4GB/channel
+# Total channel capacity is 4GiB
+# 4 devices/rank * 1 ranks/channel * 1GiB/device = 4GiB/channel
 class DDR4_2400_4x16(DDR4_2400_16x4):
     # 4x16 configuration, 4 devices each with an 16-bit interface
     device_bus_width = 16

     # Each device has a page (row buffer) size of 2 Kbyte (1K columns x16)
-    device_rowbuffer_size = '2kB'
+    device_rowbuffer_size = '2KiB'

     # 4x16 configuration, so 4 devices
     devices_per_rank = 4
@@ -646,7 +646,7 @@
     dll = False

     # size of device
-    device_size = '512MB'
+    device_size = '512MiB'

     # 1x32 configuration, 1 device with a 32-bit interface
     device_bus_width = 32
@@ -656,7 +656,7 @@

     # Each device has a page (row buffer) size of 1KB
     # (this depends on the memory density)
-    device_rowbuffer_size = '1kB'
+    device_rowbuffer_size = '1KiB'

     # 1x32 configuration, so 1 device
     devices_per_rank = 1
@@ -745,7 +745,7 @@
     dll = False

     # size of device
-    device_size = '1024MB'
+    device_size = '1024MiB'

     # 1x128 configuration, 1 device with a 128-bit interface
     device_bus_width = 128
@@ -755,7 +755,7 @@

     # Each device has a page (row buffer) size of 4KB
     # (this depends on the memory density)
-    device_rowbuffer_size = '4kB'
+    device_rowbuffer_size = '4KiB'

     # 1x128 configuration, so 1 device
     devices_per_rank = 1
@@ -814,7 +814,7 @@
     dll = False

     # size of device
-    device_size = '512MB'
+    device_size = '512MiB'

     # 1x32 configuration, 1 device with a 32-bit interface
     device_bus_width = 32
@@ -823,7 +823,7 @@
     burst_length = 8

     # Each device has a page (row buffer) size of 4KB
-    device_rowbuffer_size = '4kB'
+    device_rowbuffer_size = '4KiB'

     # 1x32 configuration, so 1 device
     devices_per_rank = 1
@@ -911,7 +911,7 @@
 # H5GQ1H24AFR) in a 2x32 configuration.
 class GDDR5_4000_2x32(DRAMInterface):
     # size of device
-    device_size = '128MB'
+    device_size = '128MiB'

     # 2x32 configuration, 1 device with a 32-bit interface
     device_bus_width = 32
@@ -992,7 +992,7 @@
 # ("HBM: Memory Solution for High Performance Processors", MemCon, 2014),
 # IDD measurement values, and by extrapolating data from other classes.
 # Architecture values based on published HBM spec
-# A 4H stack is defined, 2Gb per die for a total of 1GB of memory.
+# A 4H stack is defined, 2Gb per die for a total of 1GiB of memory.
 class HBM_1000_4H_1x128(DRAMInterface):
     # HBM gen1 supports up to 8 128-bit physical channels
     # Configuration defines a single channel, with the capacity
@@ -1006,11 +1006,11 @@
     # HBM supports BL4 and BL2 (legacy mode only)
     burst_length = 4

-    # size of channel in bytes, 4H stack of 2Gb dies is 1GB per stack;
-    # with 8 channels, 128MB per channel
-    device_size = '128MB'
+    # size of channel in bytes, 4H stack of 2Gb dies is 1GiB per stack;
+    # with 8 channels, 128MiB per channel
+    device_size = '128MiB'

-    device_rowbuffer_size = '2kB'
+    device_rowbuffer_size = '2KiB'

     # 1x128 configuration
     devices_per_rank = 1
@@ -1077,7 +1077,7 @@

 # A single HBM x64 interface (one command and address bus), with
 # default timings based on HBM gen1 and data publically released
-# A 4H stack is defined, 8Gb per die for a total of 4GB of memory.
+# A 4H stack is defined, 8Gb per die for a total of 4GiB of memory.
 # Note: This defines a pseudo-channel with a unique controller
 # instantiated per pseudo-channel
 # Stay at same IO rate (1Gbps) to maintain timing relationship with
@@ -1095,13 +1095,13 @@
     # HBM pseudo-channel only supports BL4
     burst_length = 4

-    # size of channel in bytes, 4H stack of 8Gb dies is 4GB per stack;
-    # with 16 channels, 256MB per channel
-    device_size = '256MB'
+    # size of channel in bytes, 4H stack of 8Gb dies is 4GiB per stack;
+    # with 16 channels, 256MiB per channel
+    device_size = '256MiB'

# page size is halved with pseudo-channel; maintaining the same same number
     # of rows per pseudo-channel with 2X banks across 2 channels
-    device_rowbuffer_size = '1kB'
+    device_rowbuffer_size = '1KiB'

     # HBM has 8 or 16 banks depending on capacity
     # Starting with 4Gb dies, 16 banks are defined
@@ -1146,10 +1146,10 @@
     burst_length = 32

     # size of device in bytes
-    device_size = '1GB'
+    device_size = '1GiB'

-    # 2kB page with BG mode
-    device_rowbuffer_size = '2kB'
+    # 2KiB page with BG mode
+    device_rowbuffer_size = '2KiB'

     # Use a 1x16 configuration
     devices_per_rank = 1
@@ -1279,8 +1279,8 @@
 # Configuring for 8-bank mode, burst of 32
 class LPDDR5_5500_1x16_8B_BL32(LPDDR5_5500_1x16_BG_BL32):

-    # 4kB page with 8B mode
-    device_rowbuffer_size = '4kB'
+    # 4KiB page with 8B mode
+    device_rowbuffer_size = '4KiB'

     # LPDDR5 supports configurable bank options
     # 8B  : BL32, all frequencies
@@ -1384,8 +1384,8 @@
 # Configuring for 8-bank mode, burst of 32
 class LPDDR5_6400_1x16_8B_BL32(LPDDR5_6400_1x16_BG_BL32):

-    # 4kB page with 8B mode
-    device_rowbuffer_size = '4kB'
+    # 4KiB page with 8B mode
+    device_rowbuffer_size = '4KiB'

     # LPDDR5 supports configurable bank options
     # 8B  : BL32, all frequencies
diff --git a/src/mem/NVMInterface.py b/src/mem/NVMInterface.py
index 3f6fbc4..20f51fc 100644
--- a/src/mem/NVMInterface.py
+++ b/src/mem/NVMInterface.py
@@ -76,7 +76,7 @@
     device_rowbuffer_size = '256B'

     # 8X capacity compared to DDR4 x4 DIMM with 8Gb devices
-    device_size = '512GB'
+    device_size = '512GiB'
     # Mimic 64-bit media agnostic DIMM interface
     device_bus_width = 64
     devices_per_rank = 1
diff --git a/src/mem/SimpleMemory.py b/src/mem/SimpleMemory.py
index 6e4b915..e8eac69 100644
--- a/src/mem/SimpleMemory.py
+++ b/src/mem/SimpleMemory.py
@@ -45,7 +45,7 @@
     port = ResponsePort("This port sends responses and receives requests")
     latency = Param.Latency('30ns', "Request to response latency")
latency_var = Param.Latency('0ns', "Request to response latency variance")
-    # The memory bandwidth limit default is set to 12.8GB/s which is
+    # The memory bandwidth limit default is set to 12.8GiB/s which is
     # representative of a x64 DDR3-1600 channel.
-    bandwidth = Param.MemoryBandwidth('12.8GB/s',
+    bandwidth = Param.MemoryBandwidth('12.8GiB/s',
                                       "Combined read and write bandwidth")
diff --git a/src/mem/XBar.py b/src/mem/XBar.py
index c162584..2dfe7c1 100644
--- a/src/mem/XBar.py
+++ b/src/mem/XBar.py
@@ -138,7 +138,7 @@
system = Param.System(Parent.any, "System that the crossbar belongs to.")

     # Sanity check on max capacity to track, adjust if needed.
- max_capacity = Param.MemorySize('8MB', "Maximum capacity of snoop filter") + max_capacity = Param.MemorySize('8MiB', "Maximum capacity of snoop filter")

 # We use a coherent crossbar to connect multiple requestors to the L2
 # caches. Normally this crossbar would be part of the cache itself.
diff --git a/src/mem/cache/prefetch/Prefetcher.py b/src/mem/cache/prefetch/Prefetcher.py
index 758803f..0840c60 100644
--- a/src/mem/cache/prefetch/Prefetcher.py
+++ b/src/mem/cache/prefetch/Prefetcher.py
@@ -293,7 +293,7 @@
         "Limit the strides checked up to -X/X, if 0, disable the limit")
     start_degree = Param.Unsigned(4,
         "Initial degree (Maximum number of prefetches generated")
-    hot_zone_size = Param.MemorySize("2kB", "Memory covered by a hot zone")
+ hot_zone_size = Param.MemorySize("2KiB", "Memory covered by a hot zone")
     access_map_table_entries = Param.MemorySize("256",
         "Number of entries in the access map table")
     access_map_table_assoc = Param.Unsigned(8,
@@ -456,7 +456,7 @@
     cxx_class = "Prefetcher::STeMS"
     cxx_header = "mem/cache/prefetch/spatio_temporal_memory_streaming.hh"

-    spatial_region_size = Param.MemorySize("2kB",
+    spatial_region_size = Param.MemorySize("2KiB",
         "Memory covered by a hot zone")
     active_generation_table_entries = Param.MemorySize("64",
         "Number of entries in the active generation table")
diff --git a/src/mem/cache/tags/Tags.py b/src/mem/cache/tags/Tags.py
index ce086fa..1e5b355 100644
--- a/src/mem/cache/tags/Tags.py
+++ b/src/mem/cache/tags/Tags.py
@@ -119,8 +119,8 @@
     cxx_class = 'FALRU'
     cxx_header = "mem/cache/tags/fa_lru.hh"

- min_tracked_cache_size = Param.MemorySize("128kB", "Minimum cache size for"
-                                              " which we track statistics")
+ min_tracked_cache_size = Param.MemorySize("128KiB", "Minimum cache size" + " for which we track statistics")

     # This tag uses its own embedded indexing
     indexing_policy = NULL
diff --git a/src/python/m5/params.py b/src/python/m5/params.py
index 45082d7..2b52b93 100644
--- a/src/python/m5/params.py
+++ b/src/python/m5/params.py
@@ -692,7 +692,7 @@

 class MemorySize(CheckedInt):
     cxx_type = 'uint64_t'
-    ex_str = '512MB'
+    ex_str = '512MiB'
     size = 64
     unsigned = True
     def __init__(self, value):
@@ -704,7 +704,7 @@

 class MemorySize32(CheckedInt):
     cxx_type = 'uint32_t'
-    ex_str = '512MB'
+    ex_str = '512MiB'
     size = 32
     unsigned = True
     def __init__(self, value):
@@ -724,7 +724,7 @@
         else:
             try:
                 # Often addresses are referred to with sizes. Ex: A device
- # base address is at "512MB". Use toMemorySize() to convert + # base address is at "512MiB". Use toMemorySize() to convert # these into addresses. If the address is not specified with a # "size", an exception will occur and numeric translation will
                 # proceed below.
@@ -1748,7 +1748,7 @@

 class MemoryBandwidth(float,ParamValue):
     cxx_type = 'float'
-    ex_str = "1GB/s"
+    ex_str = "1GiB/s"
     cmd_line_settable = True

     def __new__(cls, value):
diff --git a/src/sim/Process.py b/src/sim/Process.py
index bdcb826..767dbfa 100644
--- a/src/sim/Process.py
+++ b/src/sim/Process.py
@@ -44,7 +44,7 @@
useArchPT = Param.Bool('false', 'maintain an in-memory version of the page\
                             table in an architecture-specific format')
kvmInSE = Param.Bool('false', 'initialize the process for KvmCPU in SE')
-    maxStackSize = Param.MemorySize('64MB', 'maximum size of the stack')
+    maxStackSize = Param.MemorySize('64MiB', 'maximum size of the stack')

     uid = Param.Int(100, 'user id')
     euid = Param.Int(100, 'effective user id')
diff --git a/src/sim/syscall_emul.hh b/src/sim/syscall_emul.hh
index 79cd35a..16fd175 100644
--- a/src/sim/syscall_emul.hh
+++ b/src/sim/syscall_emul.hh
@@ -1824,7 +1824,7 @@
     const ByteOrder bo = OS::byteOrder;
     switch (resource) {
       case OS::TGT_RLIMIT_STACK:
-        // max stack size in bytes: make up a number (8MB for now)
+        // max stack size in bytes: make up a number (8MiB for now)
         rlp->rlim_cur = rlp->rlim_max = 8 * 1024 * 1024;
         rlp->rlim_cur = htog(rlp->rlim_cur, bo);
         rlp->rlim_max = htog(rlp->rlim_max, bo);
@@ -1867,7 +1867,7 @@
         const ByteOrder bo = OS::byteOrder;
         switch (resource) {
           case OS::TGT_RLIMIT_STACK:
-            // max stack size in bytes: make up a number (8MB for now)
+            // max stack size in bytes: make up a number (8MiB for now)
             rlp->rlim_cur = rlp->rlim_max = 8 * 1024 * 1024;
             rlp->rlim_cur = htog(rlp->rlim_cur, bo);
             rlp->rlim_max = htog(rlp->rlim_max, bo);

--
To view, visit https://gem5-review.googlesource.com/c/public/gem5/+/39475
To unsubscribe, or for help writing mail filters, visit https://gem5-review.googlesource.com/settings

Gerrit-Project: public/gem5
Gerrit-Branch: develop
Gerrit-Change-Id: I0849b97d75e17fca2c782166185f41dd2cf6b0a5
Gerrit-Change-Number: 39475
Gerrit-PatchSet: 1
Gerrit-Owner: Andreas Sandberg <andreas.sandb...@arm.com>
Gerrit-MessageType: newchange
_______________________________________________
gem5-dev mailing list -- gem5-dev@gem5.org
To unsubscribe send an email to gem5-dev-le...@gem5.org
%(web_page_url)slistinfo%(cgiext)s/%(_internal_name)s

Reply via email to