Matt Sinclair has submitted this change. ( https://gem5-review.googlesource.com/c/public/gem5/+/50967 )

Change subject: configs, gpu-compute: update GPU scripts to remove master/slave
......................................................................

configs, gpu-compute: update GPU scripts to remove master/slave

Update apu_se and underlying configuration files for GPU runs to
replace the master/slave terminology.

Change-Id: Icf309782f0899dc412eccd27e3ac017902316a70
Reviewed-on: https://gem5-review.googlesource.com/c/public/gem5/+/50967
Tested-by: kokoro <noreply+kok...@google.com>
Reviewed-by: Matthew Poremba <matthew.pore...@amd.com>
Reviewed-by: Jason Lowe-Power <power...@gmail.com>
Reviewed-by: Bobby R. Bruce <bbr...@ucdavis.edu>
Maintainer: Jason Lowe-Power <power...@gmail.com>
Maintainer: Bobby R. Bruce <bbr...@ucdavis.edu>
---
M configs/common/GPUTLBConfig.py
M configs/example/apu_se.py
2 files changed, 53 insertions(+), 28 deletions(-)

Approvals:
Jason Lowe-Power: Looks good to me, but someone else must approve; Looks good to me, approved
  Matthew Poremba: Looks good to me, approved
  Bobby R. Bruce: Looks good to me, approved; Looks good to me, approved
  kokoro: Regressions pass




diff --git a/configs/common/GPUTLBConfig.py b/configs/common/GPUTLBConfig.py
index 958cf1f..d7adaee 100644
--- a/configs/common/GPUTLBConfig.py
+++ b/configs/common/GPUTLBConfig.py
@@ -148,8 +148,8 @@
         for TLB_type in hierarchy_level:
             name = TLB_type['name']
             for index in range(TLB_type['width']):
-                exec('system.%s_coalescer[%d].master[0] = \
-                        system.%s_tlb[%d].slave[0]' % \
+                exec('system.%s_coalescer[%d].mem_side_ports[0] = \
+                        system.%s_tlb[%d].cpu_side_ports[0]' % \
                         (name, index, name, index))

     # Connect the cpuSidePort (slave) of all the coalescers in level 1
@@ -163,12 +163,12 @@
                 if tlb_per_cu:
                     for tlb in range(tlb_per_cu):
exec('system.cpu[%d].CUs[%d].translation_port[%d] = \
-                                system.l1_coalescer[%d].slave[%d]' % \
+ system.l1_coalescer[%d].cpu_side_ports[%d]' % \
                                 (shader_idx, cu_idx, tlb,
                                     cu_idx*tlb_per_cu+tlb, 0))
                 else:
                     exec('system.cpu[%d].CUs[%d].translation_port[%d] = \
-                            system.l1_coalescer[%d].slave[%d]' % \
+                            system.l1_coalescer[%d].cpu_side_ports[%d]' % \
                             (shader_idx, cu_idx, tlb_per_cu,
                                 cu_idx / (n_cu / num_TLBs),
                                 cu_idx % (n_cu / num_TLBs)))
@@ -177,14 +177,14 @@
                 sqc_tlb_index = index / options.cu_per_sqc
                 sqc_tlb_port_id = index % options.cu_per_sqc
                 exec('system.cpu[%d].CUs[%d].sqc_tlb_port = \
-                        system.sqc_coalescer[%d].slave[%d]' % \
+                        system.sqc_coalescer[%d].cpu_side_ports[%d]' % \
(shader_idx, index, sqc_tlb_index, sqc_tlb_port_id))
         elif name == 'scalar': # Scalar D-TLB
             for index in range(n_cu):
                 scalar_tlb_index = index / options.cu_per_scalar_cache
                 scalar_tlb_port_id = index % options.cu_per_scalar_cache
                 exec('system.cpu[%d].CUs[%d].scalar_tlb_port = \
-                        system.scalar_coalescer[%d].slave[%d]' % \
+                        system.scalar_coalescer[%d].cpu_side_ports[%d]' % \
                         (shader_idx, index, scalar_tlb_index,
                          scalar_tlb_port_id))

@@ -196,11 +196,12 @@
     for TLB_type in L1:
         name = TLB_type['name']
         for index in range(TLB_type['width']):
-            exec('system.%s_tlb[%d].master[0] = \
-                    system.l2_coalescer[0].slave[%d]' % \
+            exec('system.%s_tlb[%d].mem_side_ports[0] = \
+                    system.l2_coalescer[0].cpu_side_ports[%d]' % \
                     (name, index, l2_coalescer_index))
             l2_coalescer_index += 1
     # L2 <-> L3
-    system.l2_tlb[0].master[0] = system.l3_coalescer[0].slave[0]
+    system.l2_tlb[0].mem_side_ports[0] = \
+        system.l3_coalescer[0].cpu_side_ports[0]

     return system
diff --git a/configs/example/apu_se.py b/configs/example/apu_se.py
index 7a45952..29ceddb 100644
--- a/configs/example/apu_se.py
+++ b/configs/example/apu_se.py
@@ -342,8 +342,9 @@
         compute_units[-1].prefetch_prev_type = args.pf_type

     # attach the LDS and the CU to the bus (actually a Bridge)
-    compute_units[-1].ldsPort = compute_units[-1].ldsBus.slave
- compute_units[-1].ldsBus.master = compute_units[-1].localDataStore.cuPort
+    compute_units[-1].ldsPort = compute_units[-1].ldsBus.cpu_side_port
+    compute_units[-1].ldsBus.mem_side_port = \
+        compute_units[-1].localDataStore.cuPort

 # Attach compute units to GPU
 shader.CUs = compute_units
@@ -561,8 +562,8 @@
 Ruby.create_system(args, None, system, None, dma_list, None)
 system.ruby.clk_domain = SrcClockDomain(clock = args.ruby_clock,
                                     voltage_domain = system.voltage_domain)
-gpu_cmd_proc.pio = system.piobus.master
-gpu_hsapp.pio = system.piobus.master
+gpu_cmd_proc.pio = system.piobus.mem_side_ports
+gpu_hsapp.pio = system.piobus.mem_side_ports

 for i, dma_device in enumerate(dma_list):
     exec('system.dma_cntrl%d.clk_domain = system.ruby.clk_domain' % i)
@@ -575,17 +576,19 @@
     system.cpu[i].createInterruptController()

     # Connect cache port's to ruby
-    system.cpu[i].icache_port = ruby_port.slave
-    system.cpu[i].dcache_port = ruby_port.slave
+    system.cpu[i].icache_port = ruby_port.in_ports
+    system.cpu[i].dcache_port = ruby_port.in_ports

-    ruby_port.mem_master_port = system.piobus.slave
+    ruby_port.mem_request_port = system.piobus.cpu_side_ports
     if buildEnv['TARGET_ISA'] == "x86":
-        system.cpu[i].interrupts[0].pio = system.piobus.master
-        system.cpu[i].interrupts[0].int_master = system.piobus.slave
-        system.cpu[i].interrupts[0].int_slave = system.piobus.master
+        system.cpu[i].interrupts[0].pio = system.piobus.mem_side_ports
+        system.cpu[i].interrupts[0].int_requestor = \
+            system.piobus.cpu_side_ports
+        system.cpu[i].interrupts[0].int_responder = \
+            system.piobus.mem_side_ports
         if fast_forward:
             system.cpu[i].mmu.connectWalkerPorts(
-                ruby_port.slave, ruby_port.slave)
+                ruby_port.in_ports, ruby_port.in_ports)

 # attach CU ports to Ruby
 # Because of the peculiarities of the CP core, you may have 1 CPU but 2
@@ -615,7 +618,7 @@
     # in one GPU issue cycle. Hence wavefront_size mem ports.
     for j in range(wavefront_size):
         system.cpu[shader_idx].CUs[i].memory_port[j] = \
-                  system.ruby._cpu_ports[gpu_port_idx].slave[j]
+                  system.ruby._cpu_ports[gpu_port_idx].in_ports[j]
     gpu_port_idx += 1

 for i in range(n_cu):
@@ -623,7 +626,7 @@
         print("incrementing idx on ", i)
         gpu_port_idx += 1
     system.cpu[shader_idx].CUs[i].sqc_port = \
-            system.ruby._cpu_ports[gpu_port_idx].slave
+            system.ruby._cpu_ports[gpu_port_idx].in_ports
 gpu_port_idx = gpu_port_idx + 1

 for i in range(n_cu):
@@ -631,19 +634,21 @@
         print("incrementing idx on ", i)
         gpu_port_idx += 1
     system.cpu[shader_idx].CUs[i].scalar_port = \
-        system.ruby._cpu_ports[gpu_port_idx].slave
+        system.ruby._cpu_ports[gpu_port_idx].in_ports
 gpu_port_idx = gpu_port_idx + 1

 # attach CP ports to Ruby
 for i in range(args.num_cp):
     system.cpu[cp_idx].createInterruptController()
     system.cpu[cp_idx].dcache_port = \
-                system.ruby._cpu_ports[gpu_port_idx + i * 2].slave
+                system.ruby._cpu_ports[gpu_port_idx + i * 2].in_ports
     system.cpu[cp_idx].icache_port = \
-                system.ruby._cpu_ports[gpu_port_idx + i * 2 + 1].slave
-    system.cpu[cp_idx].interrupts[0].pio = system.piobus.master
-    system.cpu[cp_idx].interrupts[0].int_master = system.piobus.slave
-    system.cpu[cp_idx].interrupts[0].int_slave = system.piobus.master
+                system.ruby._cpu_ports[gpu_port_idx + i * 2 + 1].in_ports
+    system.cpu[cp_idx].interrupts[0].pio = system.piobus.mem_side_ports
+    system.cpu[cp_idx].interrupts[0].int_requestor = \
+        system.piobus.cpu_side_ports
+    system.cpu[cp_idx].interrupts[0].int_responder = \
+        system.piobus.mem_side_ports
     cp_idx = cp_idx + 1

################# Connect the CPU and GPU via GPU Dispatcher ##################

--
To view, visit https://gem5-review.googlesource.com/c/public/gem5/+/50967
To unsubscribe, or for help writing mail filters, visit https://gem5-review.googlesource.com/settings

Gerrit-Project: public/gem5
Gerrit-Branch: develop
Gerrit-Change-Id: Icf309782f0899dc412eccd27e3ac017902316a70
Gerrit-Change-Number: 50967
Gerrit-PatchSet: 3
Gerrit-Owner: Matt Sinclair <mattdsincl...@gmail.com>
Gerrit-Reviewer: Alex Dutu <alexandru.d...@amd.com>
Gerrit-Reviewer: Bobby R. Bruce <bbr...@ucdavis.edu>
Gerrit-Reviewer: Jason Lowe-Power <ja...@lowepower.com>
Gerrit-Reviewer: Jason Lowe-Power <power...@gmail.com>
Gerrit-Reviewer: Matt Sinclair <mattdsincl...@gmail.com>
Gerrit-Reviewer: Matthew Poremba <matthew.pore...@amd.com>
Gerrit-Reviewer: kokoro <noreply+kok...@google.com>
Gerrit-MessageType: merged
_______________________________________________
gem5-dev mailing list -- gem5-dev@gem5.org
To unsubscribe send an email to gem5-dev-le...@gem5.org
%(web_page_url)slistinfo%(cgiext)s/%(_internal_name)s

Reply via email to