Hi Dear Swapnil

according to our discussion about applying fast-forwarding on config-file, I 
want to let you know that my config file without any simulation-controller flag 
(such as -I, -F) works well and can run without any problem hopefully.
1) As you recommended, i add some parts from "Simulation.py" to apply the 
aforementioned flags. I attach this new config_file that if its possible to 
take look on it.I run it only for few seconds and didn't get any error.
2) at the end of the code i used this statement:(test_sys.mem_mode = 'timing')

do you think by this way my memory_mode is changed for the rest of my 
simulation?


Thanks a lot due to giving useful suggestions

Sincerely,
Ashkan Asgharzadeh



   
# ***** be name khodaye mehrban *****


# @@@@@@@@@@@@@@@@@@@@ Reference @@@@@@@@@@@@@@@@@@@@


# ----------> This File is written from these three web-sites 
#and also the file ((two_level.py)) in ""learning_gem5""
# ----- continue ----->  sub-directory that is located in gem5 directory. 

# 1) "http://gem5-users.gem5.narkive.com/OACccVAd/modeling-l3-last-level-cache-in-gem5";
# 2) "http://pages.cs.wisc.edu/~david/courses/cs752/Spring2015/gem5-tutorial/part1/cache_config.html";
# 3) "http://www.m5sim.org/SPEC_CPU2006_benchmarks";


# @@@@@@@@@@@@@@@@@@@@ Reference @@@@@@@@@@@@@@@@@@@@

import m5

from m5.objects import *

from m5.util import *

from m5.defines import buildEnv

from m5.simulate import *

from caches import *

import MyBench

from optparse import OptionParser

m5.util.addToPath('../../common')

import Options




# (Not_WE) add the options we want to be able to control from the command_line.

parser = OptionParser()

Options.addCommonOptions(parser)

parser.add_option("--benchmark0", default="", help="The benchmark to be loaded in CPU0")

parser.add_option("--benchmark1", default="", help="The benchmark to be loaded in CPU1")

parser.add_option("--chkpt0", default="", help="The Checkpoint to be loaded in CPU0")

parser.add_option("--chkpt1", default="", help="The Checkpoint to be loaded in CPU1")

parser.add_option("--SDID_C0", help="Security Domain ID of core(0)")

parser.add_option("--SDID_C1", help="Security Domain ID of core(1)")

(options,args) = parser.parse_args()


np = 2


# (WE) --> create the system we are going to simulate

test_sys = System(cpu = [AtomicSimpleCPU(cpu_id=i) for i in xrange(np)], mem_mode = 'atomic',
 mem_ranges = [AddrRange('4GB')])


# (WE) --> create a top-level voltage domain

test_sys.voltage_domain = VoltageDomain()


# (WE) --> create a source clock for the system and set the clock period

test_sys.clk_domain = SrcClockDomain(clock = '2GHz', voltage_domain = test_sys.voltage_domain)


# (WE) --> create a CPU voltage domain

test_sys.cpu_voltage_domain = VoltageDomain()


# (WE) --> create a seprate clock domain for the CPUs

test_sys.cpu_clk_domain = SrcClockDomain(clock = '2GHz' , voltage_domain = test_sys.cpu_voltage_domain)


# (WE) --> all cpus belong to a common cpu_clk_domain, therefore running at a common frequency. 

for cpu in test_sys.cpu:

  cpu.clk_domain = test_sys.cpu_clk_domain


# (WE) --> create CPU's local L1_Inst and L1_Data caches and also we define a Unified and Private L2 cache per each core.

for i in xrange(np):

  # (Not_WE) --> create an l1 instruction and data caches

  test_sys.cpu[i].icache = L1ICache(options)

  test_sys.cpu[i].dcache = L1DCache(options)

  # create ITB and DTB page walker caches per core
  test_sys.cpu[i].itb_walker_cache = PageTableWalkerCache()
  test_sys.cpu[i].dtb_walker_cache = PageTableWalkerCache()
    
  # (Not_WE) --> connect the instruction and data caches to their corresponding CPU

  test_sys.cpu[i].icache.connectCPU(test_sys.cpu[i].icache_port)

  test_sys.cpu[i].dcache.connectCPU(test_sys.cpu[i].dcache_port)

  test_sys.cpu[i].itb_walker_cache.cpu_side = test_sys.cpu[i].itb.walker.port
  
  test_sys.cpu[i].dtb_walker_cache.cpu_side = test_sys.cpu[i].dtb.walker.port  

  
  # (Not_WE) --> Define the L2caches, L2caches's bus and establish their connections

# (Not_WE) --> create a l2_cache bus, a coherent crossbar, in this case

test_sys.l2bus = [L2XBar() for i in xrange(np)]


for i in xrange(np):

  # (Not_WE) --> Hook the cpu ports up to their corresponding l2bus

  test_sys.cpu[i].icache.connectBus(test_sys.l2bus[i])

  test_sys.cpu[i].dcache.connectBus(test_sys.l2bus[i])
  
  test_sys.cpu[i].itb_walker_cache.mem_side = test_sys.l2bus[i].slave
  
  test_sys.cpu[i].dtb_walker_cache.mem_side = test_sys.l2bus[i].slave  


# (Not_WE) --> create two L2caches and connect them to their corresponding l2buses

test_sys.l2cache = [L2Cache(options) for i in xrange(np)]

for i in xrange(np):

  test_sys.l2cache[i].connectCPUSideBus(test_sys.l2bus[i])


# (Not_WE) --> we should define a Shared L3_Cahe that it is used by two l2 caches.

test_sys.l3cache = [L3Cache(options) for i in xrange(np)]


# (Not_WE) --> create L3 cache's bus as a Coherent-crossbar

test_sys.l3bus = [L3XBar() for i in xrange(np)]


# (Not_WE) --> set the connections related to L3 cache's bus

# 1) Master_Port:

for i in xrange(np):

  test_sys.l3cache[i].connectCPUSideBusL3(test_sys.l3bus[i])


# 2) Slave_Port:
for i in xrange(np):
    
    test_sys.l2cache[i].connectMemSideBus(test_sys.l3bus[i])



# (Not_WE) --> create a memory bus

test_sys.membus = SystemXBar()


# (Not_WE) --> connect the L3 cache to the mem_bus
for i in xrange(np):

    test_sys.l3cache[i].connectMemSideBusL3(test_sys.membus)


# (Not_WE) create the interrupt_Controller for the CPUs and connect to the Mem_Bus.

# (Not_WE) : Note(1): -----> These are directly connected to the memory_bus and are not cached.

# (Not_WE) : Note(2): -----> These connections for Interrupt Controller and Memory_Bus's ports, are defined in

# ----- continue -----> X86 ISA and are not defined in ARM Architecture; we just have to define the Interrupt_Controller,

# ----- continue -----> not connections of master and slave ports in ARM architecture.

for i in xrange(np):

  test_sys.cpu[i].createInterruptController()

# (Not_WE) --> connect the system up to the membus

test_sys.system_port = test_sys.membus.slave


# (Not_WE) --> create a DDR3 memory controller

test_sys.mem_ctrl = DDR3_1600_x64()

#system.mem_ctrl.range = system.mem_ranges[0]

test_sys.mem_ctrl.port = test_sys.membus.master


# (Not_WE) --> create a process from options.benchmark1 for CPU1

process = [LiveProcess() for i in xrange(np)]

if options.benchmark0 == 'bzip2':

  process[0] = MyBench.bzip2

elif options.benchmark0 == 'gcc':

  process[0] = MyBench.gcc

elif options.benchmark0 == 'mcf':

  process[0] = MyBench.mcf

elif options.benchmark0 == 'gobmk':

  process[0] = MyBench.gobmk

elif options.benchmark0 == 'hmmer':

  process[0] = MyBench.hmmer

elif options.benchmark0 == 'sjeng':

  process[0] = MyBench.sjeng

elif options.benchmark0 == 'libquantum':

  process[0] = MyBench.libquantum

elif options.benchmark0 == 'h264ref':

  process[0] = MyBench.h264ref

elif options.benchmark0 == 'astar':

  process[0] = MyBench.astar

elif options.benchmark0 == 'xalancbmk':

  process[0] = MyBench.xalancbmk


# (Not_WE) -->  create a process from options.benchmark2 for CPU2

if options.benchmark1 == 'bzip2':

  process[1] = MyBench.bzip2

elif options.benchmark1 == 'gcc':

  process[1] = MyBench.gcc

elif options.benchmark1 == 'mcf':

  process[1] = MyBench.mcf

elif options.benchmark1 == 'gobmk':

  process[1] = MyBench.gobmk

elif options.benchmark1 == 'hmmer':

  process[1] = MyBench.hmmer

elif options.benchmark1 == 'sjeng':

  process[1] = MyBench.sjeng

elif options.benchmark1 == 'libquantum':

  process[1] = MyBench.libquantum

elif options.benchmark1 == 'h264ref':

  process[1] = MyBench.h264ref

elif options.benchmark1 == 'astar':

  process[1] = MyBench.astar

elif options.benchmark1 == 'xalancbmk':

  process[1] = MyBench.xalancbmk


# (Not_WE) -->  load the check_point(1) to the CPU(1)

if options.chkpt0 !="":

  process[0].chkpt = options.chkpt0


# (Not_WE) --> load the check_point(2) to the CPU(2)

if options.chkpt1 !="":

  process[1].chkpt = options.chkpt1


# (Not_WE) --> set the cpu to use its corresponding process as its workload and create thread_contexts.

for i in xrange(np):

  test_sys.cpu[i].workload = process[i]

  test_sys.cpu[i].createThreads()

# Set the SDID of each cpu from input options
if options.SDID_C0 !="":
    test_sys.cpu[0].SD_ID = options.SDID_C0

if options.SDID_C1 !="":
    test_sys.cpu[1].SD_ID = options.SDID_C1

# (Not_WE) set up the root SimObject

root = Root(full_system = False, system = test_sys)	

# create switched cpu class 
switch_cpus = [DerivO3CPU(switched_out=True,cpu_id = (i)) for i in xrange(np)]

for i in xrange(np):
  if options.fast_forward:
    test_sys.cpu[i].max_insts_any_thread = int(options.fast_forward)
  
  switch_cpus[i].system = test_sys
  switch_cpus[i].workload = test_sys.cpu[i].workload
  switch_cpus[i].clk_domain = test_sys.cpu[i].clk_domain
  
  #simulation period
  if options.maxinsts:
    switch_cpus[i].max_insts_any_thread = options.maxinsts

test_sys.switch_cpus = switch_cpus	
	
switch_cpu_list = [(test_sys.cpu[i],switch_cpus[i]) for i in xrange(np)]


# (Not_WE) instantiate all of the objects we've created above.

m5.instantiate()

maxtick = m5.MaxTick

if options.fast_forward:

  print "\n\n Switch at instruction count:%s" %str(test_sys.cpu[0].max_insts_any_thread)
  
  exit_event = m5.simulate()

print "\n\n Switched CPUs @ tick %s" %(m5.curTick())  

m5.switchCpus(test_sys,switch_cpu_list)

test_sys.mem_mode = 'timing'

if options.fast_forward:
  
  m5.stats.reset()
  
  
print "\n\n *** Real Simulation ***"

exit_event = m5.simulate(maxtick-m5.curTick()) 
  
print '\n\n Exiting @ tick %i because %s' %(m5.curTick(),exit_event.getCause())







_______________________________________________
gem5-users mailing list
[email protected]
http://m5sim.org/cgi-bin/mailman/listinfo/gem5-users

Reply via email to