Re: [RFC PATCH v2 1/3] plugins: Added a new cache modelling plugin

2021-06-01 Thread Mahmoud Mandour
On Sun, May 30, 2021 at 8:37 AM Mahmoud Mandour 
wrote:

> Added a cache modelling plugin that uses a static configuration used in
> many of the commercial microprocessors and uses random eviction policy.
>
> Signed-off-by: Mahmoud Mandour 
> ---
>  contrib/plugins/Makefile |   1 +
>  contrib/plugins/cache.c  | 398 +++
>  2 files changed, 399 insertions(+)
>  create mode 100644 contrib/plugins/cache.c
>
> diff --git a/contrib/plugins/Makefile b/contrib/plugins/Makefile
> index b9d7935e5e..2237b47f8b 100644
> --- a/contrib/plugins/Makefile
> +++ b/contrib/plugins/Makefile
> @@ -18,6 +18,7 @@ NAMES += hotpages
>  NAMES += howvec
>  NAMES += lockstep
>  NAMES += hwprofile
> +NAMES += cache
>
>  SONAMES := $(addsuffix .so,$(addprefix lib,$(NAMES)))
>
> diff --git a/contrib/plugins/cache.c b/contrib/plugins/cache.c
> new file mode 100644
> index 00..8c9d1dd538
> --- /dev/null
> +++ b/contrib/plugins/cache.c
> @@ -0,0 +1,398 @@
> +/*
> + * Copyright (C) 2021, Mahmoud Mandour 
> + *
> + * License: GNU GPL, version 2 or later.
> + *   See the COPYING file in the top-level directory.
> + */
> +
> +#include 
> +#include 
> +#include 
> +#include 
> +#include 
> +#include 
> +#include 
> +#include 
> +
> +#include 
> +
> +QEMU_PLUGIN_EXPORT int qemu_plugin_version = QEMU_PLUGIN_VERSION;
> +
> +static GRand *rng;
> +static GHashTable *dmiss_ht;
> +static GHashTable *imiss_ht;
> +
> +static GMutex dmtx, imtx;
> +
> +static int limit;
> +static bool sys;
> +
> +static uint64_t dmem_accesses;
> +static uint64_t dmisses;
> +
> +static uint64_t imem_accesses;
> +static uint64_t imisses;
> +
> +static enum qemu_plugin_mem_rw rw = QEMU_PLUGIN_MEM_RW;
> +
> +enum AccessResult {
> +HIT = 0,
> +MISS = 1
> +};
> +
> +struct InsnData {
> +char *disas_str;
> +uint64_t addr;
> +uint64_t misses;
> +};
> +
> +struct CacheBlock {
> +uint64_t tag;
> +bool valid;
> +};
> +
> +struct CacheSet {
> +struct CacheBlock *blocks;
> +};
> +
> +struct Cache {
> +struct CacheSet *sets;
> +int num_sets;
> +
> +int cachesize;
> +int blksize;
> +int assoc;
> +
> +uint64_t blk_mask;
> +uint64_t set_mask;
> +uint64_t tag_mask;
> +};
> +
> +struct Cache *dcache, *icache;
> +
> +static int pow_of_two(int num)
> +{
> +g_assert((num & (num - 1)) == 0);
> +int ret = 0;
> +while (num /= 2) {
> +ret++;
> +}
> +return ret;
> +}
> +
> +static inline uint64_t extract_tag(struct Cache *cache, uint64_t addr)
> +{
> +return (addr & cache->tag_mask) >>
> +(pow_of_two(cache->num_sets) + pow_of_two(cache->blksize));
> +}
> +
> +static inline uint64_t extract_set(struct Cache *cache, uint64_t addr)
> +{
> +return (addr & cache->set_mask) >> (pow_of_two(cache->blksize));
> +}
> +
> +static struct Cache *cache_init(int blksize, int assoc, int cachesize)
> +{
> +struct Cache *cache;
> +int i;
> +
> +cache = g_new(struct Cache, 1);
> +cache->blksize = blksize;
> +cache->assoc = assoc;
> +cache->cachesize = cachesize;
> +cache->num_sets = cachesize / (blksize * assoc);
> +cache->sets = g_new(struct CacheSet, cache->num_sets);
> +
> +for (i = 0; i < cache->num_sets; i++) {
> +cache->sets[i].blocks = g_new0(struct CacheBlock, assoc);
> +}
> +
> +cache->blk_mask = blksize - 1;
> +cache->set_mask = ((cache->num_sets - 1) <<
> (pow_of_two(cache->blksize)));
> +cache->tag_mask = ~(cache->set_mask | cache->blk_mask);
> +
> +return cache;
> +}
> +
> +static int get_invalid_block(struct Cache *cache, uint64_t set)
> +{
> +int i;
> +
> +for (i = 0; i < cache->assoc; i++) {
> +if (!cache->sets[set].blocks[i].valid) {
> +/* conflict miss */
> +return i;
> +}
> +}
> +
> +/* compulsary miss */
> +return -1;
> +}
> +
> +static int get_replaced_block(struct Cache *cache)
> +{
> +return g_rand_int_range(rng, 0, cache->assoc);
> +}
> +
> +static bool in_cache(struct Cache *cache, uint64_t addr)
> +{
> +int i;
> +uint64_t tag, set;
> +
> +tag = extract_tag(cache, addr);
> +set = extract_set(cache, addr);
> +
> +for (i = 0; i < cache->assoc; i++) {
> +if (cache->sets[set].blocks[i].tag == tag &&
> +cache->sets[set].blocks[i].valid) {
> +return true;
> +}
> +}
> +
> +return false;
> +}
> +
> +static enum AccessResult access_cache(struct Cache *cache, uint64_t addr)
> +{
> +uint64_t tag, set;
> +int replaced_blk;
> +
> +if (in_cache(cache, addr)) {
> +return HIT;
> +}
> +
> +tag = extract_tag(cache, addr);
> +set = extract_set(cache, addr);
> +
> +replaced_blk = get_invalid_block(cache, set);
> +
> +if (replaced_blk == -1) {
> +replaced_blk = get_replaced_block(cache);
> +}
> +
> +cache->sets[set].blocks[replaced_blk].tag = tag;
> +cache->sets[set].blocks[replaced_blk].valid = true;
> +
> 

[RFC PATCH v2 1/3] plugins: Added a new cache modelling plugin

2021-05-30 Thread Mahmoud Mandour
Added a cache modelling plugin that uses a static configuration used in
many of the commercial microprocessors and uses random eviction policy.

Signed-off-by: Mahmoud Mandour 
---
 contrib/plugins/Makefile |   1 +
 contrib/plugins/cache.c  | 398 +++
 2 files changed, 399 insertions(+)
 create mode 100644 contrib/plugins/cache.c

diff --git a/contrib/plugins/Makefile b/contrib/plugins/Makefile
index b9d7935e5e..2237b47f8b 100644
--- a/contrib/plugins/Makefile
+++ b/contrib/plugins/Makefile
@@ -18,6 +18,7 @@ NAMES += hotpages
 NAMES += howvec
 NAMES += lockstep
 NAMES += hwprofile
+NAMES += cache
 
 SONAMES := $(addsuffix .so,$(addprefix lib,$(NAMES)))
 
diff --git a/contrib/plugins/cache.c b/contrib/plugins/cache.c
new file mode 100644
index 00..8c9d1dd538
--- /dev/null
+++ b/contrib/plugins/cache.c
@@ -0,0 +1,398 @@
+/*
+ * Copyright (C) 2021, Mahmoud Mandour 
+ *
+ * License: GNU GPL, version 2 or later.
+ *   See the COPYING file in the top-level directory.
+ */
+
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+
+#include 
+
+QEMU_PLUGIN_EXPORT int qemu_plugin_version = QEMU_PLUGIN_VERSION;
+
+static GRand *rng;
+static GHashTable *dmiss_ht;
+static GHashTable *imiss_ht;
+
+static GMutex dmtx, imtx;
+
+static int limit;
+static bool sys;
+
+static uint64_t dmem_accesses;
+static uint64_t dmisses;
+
+static uint64_t imem_accesses;
+static uint64_t imisses;
+
+static enum qemu_plugin_mem_rw rw = QEMU_PLUGIN_MEM_RW;
+
+enum AccessResult {
+HIT = 0,
+MISS = 1
+};
+
+struct InsnData {
+char *disas_str;
+uint64_t addr;
+uint64_t misses;
+};
+
+struct CacheBlock {
+uint64_t tag;
+bool valid;
+};
+
+struct CacheSet {
+struct CacheBlock *blocks;
+};
+
+struct Cache {
+struct CacheSet *sets;
+int num_sets;
+
+int cachesize;
+int blksize;
+int assoc;
+
+uint64_t blk_mask;
+uint64_t set_mask;
+uint64_t tag_mask;
+};
+
+struct Cache *dcache, *icache;
+
+static int pow_of_two(int num)
+{
+g_assert((num & (num - 1)) == 0);
+int ret = 0;
+while (num /= 2) {
+ret++;
+}
+return ret;
+}
+
+static inline uint64_t extract_tag(struct Cache *cache, uint64_t addr)
+{
+return (addr & cache->tag_mask) >>
+(pow_of_two(cache->num_sets) + pow_of_two(cache->blksize));
+}
+
+static inline uint64_t extract_set(struct Cache *cache, uint64_t addr)
+{
+return (addr & cache->set_mask) >> (pow_of_two(cache->blksize));
+}
+
+static struct Cache *cache_init(int blksize, int assoc, int cachesize)
+{
+struct Cache *cache;
+int i;
+
+cache = g_new(struct Cache, 1);
+cache->blksize = blksize;
+cache->assoc = assoc;
+cache->cachesize = cachesize;
+cache->num_sets = cachesize / (blksize * assoc);
+cache->sets = g_new(struct CacheSet, cache->num_sets);
+
+for (i = 0; i < cache->num_sets; i++) {
+cache->sets[i].blocks = g_new0(struct CacheBlock, assoc);
+}
+
+cache->blk_mask = blksize - 1;
+cache->set_mask = ((cache->num_sets - 1) << (pow_of_two(cache->blksize)));
+cache->tag_mask = ~(cache->set_mask | cache->blk_mask);
+
+return cache;
+}
+
+static int get_invalid_block(struct Cache *cache, uint64_t set)
+{
+int i;
+
+for (i = 0; i < cache->assoc; i++) {
+if (!cache->sets[set].blocks[i].valid) {
+/* conflict miss */
+return i;
+}
+}
+
+/* compulsary miss */
+return -1;
+}
+
+static int get_replaced_block(struct Cache *cache)
+{
+return g_rand_int_range(rng, 0, cache->assoc);
+}
+
+static bool in_cache(struct Cache *cache, uint64_t addr)
+{
+int i;
+uint64_t tag, set;
+
+tag = extract_tag(cache, addr);
+set = extract_set(cache, addr);
+
+for (i = 0; i < cache->assoc; i++) {
+if (cache->sets[set].blocks[i].tag == tag &&
+cache->sets[set].blocks[i].valid) {
+return true;
+}
+}
+
+return false;
+}
+
+static enum AccessResult access_cache(struct Cache *cache, uint64_t addr)
+{
+uint64_t tag, set;
+int replaced_blk;
+
+if (in_cache(cache, addr)) {
+return HIT;
+}
+
+tag = extract_tag(cache, addr);
+set = extract_set(cache, addr);
+
+replaced_blk = get_invalid_block(cache, set);
+
+if (replaced_blk == -1) {
+replaced_blk = get_replaced_block(cache);
+}
+
+cache->sets[set].blocks[replaced_blk].tag = tag;
+cache->sets[set].blocks[replaced_blk].valid = true;
+
+return MISS;
+}
+
+struct InsnData *get_or_create(GHashTable *ht, struct InsnData *insn_data,
+   uint64_t addr)
+{
+struct InsnData *insn = g_hash_table_lookup(ht, GUINT_TO_POINTER(addr));
+if (!insn) {
+g_hash_table_insert(ht, GUINT_TO_POINTER(addr), (gpointer) insn_data);
+insn = insn_data;
+}
+
+return insn;
+}
+
+static void vcpu_mem_access(unsigned int cpu_index, qemu_plugin_meminfo_t