The flow detection was a stub implementation that was never functional, and its userspace implementation had recently been deprecated, so remove all related dead code in kio.
Related to: #VSTOR-117505 Signed-off-by: Liu Kui <[email protected]> --- fs/fuse/kio/pcs/pcs_client_types.h | 3 - fs/fuse/kio/pcs/pcs_cluster.c | 9 --- fs/fuse/kio/pcs/pcs_cluster_core.c | 2 - fs/fuse/kio/pcs/pcs_cs.c | 6 -- fs/fuse/kio/pcs/pcs_cs.h | 2 - fs/fuse/kio/pcs/pcs_cs_accel.c | 2 - fs/fuse/kio/pcs/pcs_flow_detect.h | 14 ----- fs/fuse/kio/pcs/pcs_flow_detect_stub.h | 83 -------------------------- fs/fuse/kio/pcs/pcs_map.c | 78 ++++-------------------- fs/fuse/kio/pcs/pcs_map.h | 4 -- fs/fuse/kio/pcs/pcs_req.h | 2 - 11 files changed, 11 insertions(+), 194 deletions(-) delete mode 100644 fs/fuse/kio/pcs/pcs_flow_detect.h delete mode 100644 fs/fuse/kio/pcs/pcs_flow_detect_stub.h diff --git a/fs/fuse/kio/pcs/pcs_client_types.h b/fs/fuse/kio/pcs/pcs_client_types.h index 0b5f8587441d..a919e4e931e1 100644 --- a/fs/fuse/kio/pcs/pcs_client_types.h +++ b/fs/fuse/kio/pcs/pcs_client_types.h @@ -10,7 +10,6 @@ #include "pcs_prot_types.h" #include "pcs_mds_prot.h" -#include "pcs_flow_detect.h" #include "fuse_stat.h" /* Values of lease. It is value, not bitmask. */ @@ -42,7 +41,6 @@ struct pcs_map_set { struct shrinker shrinker; /* TODO: temproraly disabled */ - struct pcs_flow_table_global ftab; }; struct pcs_mapping { @@ -51,7 +49,6 @@ struct pcs_mapping { unsigned long nrmaps; struct radix_tree_root map_tree; /* GFP_ATOMIC */ spinlock_t map_lock; - struct pcs_flow_table ftab; }; diff --git a/fs/fuse/kio/pcs/pcs_cluster.c b/fs/fuse/kio/pcs/pcs_cluster.c index 7aaf88b8bd26..b17f973da0bb 100644 --- a/fs/fuse/kio/pcs/pcs_cluster.c +++ b/fs/fuse/kio/pcs/pcs_cluster.c @@ -37,7 +37,6 @@ static inline int is_file_inline(struct pcs_dentry_info *di) void pcs_sreq_complete(struct pcs_int_request *sreq) { struct pcs_int_request *ireq = sreq->completion_data.parent; - struct pcs_cluster_core *cluster = sreq->cc; if (pcs_if_error(&sreq->error)) { if (!pcs_if_error(&ireq->error)) { @@ -74,8 +73,6 @@ void pcs_sreq_complete(struct pcs_int_request *sreq) if (!pcs_sreq_detach(sreq)) ireq_complete(ireq); - if (sreq->type == PCS_IREQ_IOCHUNK && sreq->iochunk.flow) - pcs_flow_put(sreq->iochunk.flow, &cluster->maps.ftab); ireq_destroy(sreq); } @@ -229,7 +226,6 @@ static void fiemap_process_one(struct fiemap_iterator *fiter) sreq->tok_reserved = 0; sreq->tok_serno = 0; sreq->iochunk.map = NULL; - sreq->iochunk.flow = pcs_flow_record(&di->mapping.ftab, 0, pos, end-pos, &di->cluster->maps.ftab); sreq->iochunk.cmd = PCS_REQ_T_FIEMAP; sreq->iochunk.cs_index = 0; sreq->iochunk.chunk = round_down(pos, DENTRY_CHUNK_SIZE(di)); @@ -345,7 +341,6 @@ static noinline void __pcs_cc_process_ireq_rw(struct pcs_int_request *ireq) u64 pos = ireq->apireq.req->pos; u64 sz = ireq->apireq.req->size; u64 dio_offset = 0; - struct pcs_flow_node *fl; if (di->fileinfo.sys.map_type != PCS_MAP_PLAIN) { BUG_ON(1); @@ -359,8 +354,6 @@ static noinline void __pcs_cc_process_ireq_rw(struct pcs_int_request *ireq) atomic_set(&ireq->iocount, 1); ireq->flags |= IREQ_F_CACHED; - fl = pcs_flow_record(&di->mapping.ftab, ireq->apireq.req->type == PCS_REQ_T_WRITE, - pos, sz, &di->cluster->maps.ftab); while (sz) { struct pcs_int_request *sreq; @@ -384,7 +377,6 @@ static noinline void __pcs_cc_process_ireq_rw(struct pcs_int_request *ireq) sreq->tok_reserved = 0; sreq->tok_serno = 0; sreq->iochunk.map = NULL; - sreq->iochunk.flow = pcs_flow_get(fl); sreq->iochunk.cmd = ireq->apireq.req->type; sreq->iochunk.cs_index = 0; sreq->iochunk.chunk = chunk; @@ -409,7 +401,6 @@ static noinline void __pcs_cc_process_ireq_rw(struct pcs_int_request *ireq) sz -= len; dio_offset += len; } - pcs_flow_put(fl, &di->cluster->maps.ftab); if (atomic_dec_and_test(&ireq->iocount)) ireq_complete(ireq); } diff --git a/fs/fuse/kio/pcs/pcs_cluster_core.c b/fs/fuse/kio/pcs/pcs_cluster_core.c index 94f020b78c4c..05294f35eb78 100644 --- a/fs/fuse/kio/pcs/pcs_cluster_core.c +++ b/fs/fuse/kio/pcs/pcs_cluster_core.c @@ -56,7 +56,6 @@ static int pcs_mapset_init(struct pcs_cluster_core *cc) atomic_set(&maps->count, 0); atomic_set(&maps->dirty_count, 0); pcs_mapset_limit(maps, PCS_MAP_LIMIT); - pcs_flow_table_global_init(&maps->ftab); maps->shrinker.count_objects = pcs_map_shrink_count; maps->shrinker.scan_objects = pcs_map_shrink_scan; @@ -218,7 +217,6 @@ void pcs_cc_fini(struct pcs_cluster_core *cc) BUG_ON(!list_empty(&cc->completion_queue)); BUG_ON(!list_empty(&cc->work_queue)); - pcs_flow_table_global_fini(&cc->maps.ftab); } void pcs_cc_submit(struct pcs_cluster_core *cc, struct pcs_int_request *ireq) diff --git a/fs/fuse/kio/pcs/pcs_cs.c b/fs/fuse/kio/pcs/pcs_cs.c index 8c1fdce6a2f2..738862e353d3 100644 --- a/fs/fuse/kio/pcs/pcs_cs.c +++ b/fs/fuse/kio/pcs/pcs_cs.c @@ -132,7 +132,6 @@ struct pcs_cs *pcs_cs_alloc(struct pcs_cs_set *css) cs->io_prio = -1; - INIT_LIST_HEAD(&cs->flow_lru); INIT_LIST_HEAD(&cs->bl_link); if (pcs_cs_percpu_stat_alloc(cs)) { @@ -644,8 +643,6 @@ static void complete_fo_request(struct pcs_int_request * sreq) pcs_map_put(sreq->iochunk.map); if (sreq->iochunk.csl) cslist_put(sreq->iochunk.csl); - if (sreq->iochunk.flow) - pcs_flow_put(sreq->iochunk.flow, &sreq->cc->maps.ftab); ireq_destroy(sreq); ireq_complete_fo(ireq); @@ -1005,9 +1002,6 @@ static void pcs_cs_isolate(struct pcs_cs *cs, struct list_head *dispose) BUG_ON(cs->nmaps); - if (!list_empty(&cs->flow_lru)) - pcs_flow_cs_unbind_all(cs); - BUG_ON(cs->nflows); } static void cs_destroy_rcu(struct rcu_head *head) diff --git a/fs/fuse/kio/pcs/pcs_cs.h b/fs/fuse/kio/pcs/pcs_cs.h index c41c5cc0075e..c77bfb5f3dc2 100644 --- a/fs/fuse/kio/pcs/pcs_cs.h +++ b/fs/fuse/kio/pcs/pcs_cs.h @@ -81,8 +81,6 @@ struct pcs_cs { u8 mds_flags; abs_time_t io_prio_stamp; - struct list_head flow_lru; - int nflows; unsigned long state; int blacklist_reason; diff --git a/fs/fuse/kio/pcs/pcs_cs_accel.c b/fs/fuse/kio/pcs/pcs_cs_accel.c index 036ca172e299..b148ed3636dc 100644 --- a/fs/fuse/kio/pcs/pcs_cs_accel.c +++ b/fs/fuse/kio/pcs/pcs_cs_accel.c @@ -1476,8 +1476,6 @@ static void complete_N_request(struct pcs_int_request * sreq) pcs_map_put(sreq->iochunk.map); if (sreq->iochunk.csl) cslist_put(sreq->iochunk.csl); - if (sreq->iochunk.flow) - pcs_flow_put(sreq->iochunk.flow, &sreq->cc->maps.ftab); ireq_destroy(sreq); csa_complete_acr(ireq); diff --git a/fs/fuse/kio/pcs/pcs_flow_detect.h b/fs/fuse/kio/pcs/pcs_flow_detect.h deleted file mode 100644 index d7fd8bce2184..000000000000 --- a/fs/fuse/kio/pcs/pcs_flow_detect.h +++ /dev/null @@ -1,14 +0,0 @@ -/* - * fs/fuse/kio/pcs/pcs_flow_detect.h - * - * Copyright (c) 2018-2021 Virtuozzo International GmbH. All rights reserved. - * - */ - -#ifndef _PCS_FLOW_DETECT_H_ -#define _PCS_FLOW_DETECT_H_ 1 - -/* TODO:!!! this is stump for flow_detection */ -#include "pcs_flow_detect_stub.h" - -#endif /* _PCS_FLOW_DETECT_H_ */ diff --git a/fs/fuse/kio/pcs/pcs_flow_detect_stub.h b/fs/fuse/kio/pcs/pcs_flow_detect_stub.h deleted file mode 100644 index afe7b2880877..000000000000 --- a/fs/fuse/kio/pcs/pcs_flow_detect_stub.h +++ /dev/null @@ -1,83 +0,0 @@ -/* - * fs/fuse/kio/pcs/pcs_flow_detect_stub.h - * - * Copyright (c) 2018-2021 Virtuozzo International GmbH. All rights reserved. - * - */ - -#ifndef _PCS_FLOW_DETECT_STUB_H_ -#define _PCS_FLOW_DETECT_STUB_H_ 1 - -/* TODO:!!! this is stump for flow_detection */ - -/* This should be enough for 1000 iops, otherwise lifetime is to be decreased or/and limit increased. */ -#define PCS_FLOW_LIFETIME (512) -#define PCS_FLOW_LIMIT_DFLT (512) - -#define PCS_FLOW_RECENTTIME (50) -#define PCS_FLOW_THRESH (6) - -struct pcs_flow_node -{ - int STUMB; -}; - -struct pcs_flow_table -{ - struct pcs_flow_node *STUMB; -}; - -struct pcs_flow_table_global -{ - struct pcs_flow_table *STUMB; - int nflows; -}; - -struct pcs_cs; - -static void pcs_flow_table_global_init(struct pcs_flow_table_global * gtab) __attribute__((unused)); -static void pcs_flow_table_global_fini(struct pcs_flow_table_global * gtab) __attribute__((unused)); -static void pcs_flow_table_init(struct pcs_flow_table * tab, struct pcs_flow_table_global * gtab) __attribute__((unused)); -static void pcs_flow_table_fini(struct pcs_flow_table * tab, struct pcs_flow_table_global * gtab) __attribute__((unused)); -static struct pcs_flow_node * pcs_flow_record(struct pcs_flow_table * tab, int dir, u64 start, unsigned int len, - struct pcs_flow_table_global * gtab) __attribute__((unused)); -static void pcs_flow_confirm(struct pcs_flow_node * fl, struct pcs_flow_table * tab, int dir, u64 start, unsigned int len, - struct pcs_flow_table_global * gtab) __attribute__((unused)); -static void pcs_flow_truncate(struct pcs_flow_table * tab, u64 new_size, struct pcs_flow_table_global * gtab) __attribute__((unused)); -static int pcs_flow_analysis(struct pcs_flow_table_global * gtab) __attribute__((unused)); -static int pcs_flow_cs_analysis(struct pcs_cs * cs) __attribute__((unused)); -static void pcs_flow_bind_cs(struct pcs_flow_node * fl, struct pcs_cs * cs) __attribute__((unused)); -static void pcs_flow_cs_unbind_all(struct pcs_cs * cs) __attribute__((unused)); -static void pcs_flow_put(struct pcs_flow_node * fl, struct pcs_flow_table_global * gtab) __attribute__((unused)); -static struct pcs_flow_node * pcs_flow_get(struct pcs_flow_node * fl) __attribute__((unused)); -static int pcs_flow_sequential(struct pcs_flow_node * fl) __attribute__((unused)); - - - - - - -static void pcs_flow_table_global_init(struct pcs_flow_table_global * gtab) {} -static void pcs_flow_table_global_fini(struct pcs_flow_table_global * gtab) {} -static void pcs_flow_table_init(struct pcs_flow_table * tab, struct pcs_flow_table_global * gtab) {} -static void pcs_flow_table_fini(struct pcs_flow_table * tab, struct pcs_flow_table_global * gtab) {} - -static struct pcs_flow_node * pcs_flow_record(struct pcs_flow_table * tab, int dir, u64 start, unsigned int len, - struct pcs_flow_table_global * gtab) -{ - return NULL; -} -static void pcs_flow_confirm(struct pcs_flow_node * fl, struct pcs_flow_table * tab, int dir, u64 start, unsigned int len, - struct pcs_flow_table_global * gtab) {} -static void pcs_flow_truncate(struct pcs_flow_table * tab, u64 new_size, struct pcs_flow_table_global * gtab) {} -static int pcs_flow_analysis(struct pcs_flow_table_global * gtab) { return 0; } -static int pcs_flow_cs_analysis(struct pcs_cs * cs) {return 0;} -static void pcs_flow_bind_cs(struct pcs_flow_node * fl, struct pcs_cs * cs) {} -static void pcs_flow_cs_unbind_all(struct pcs_cs * cs) {} - -static void pcs_flow_put(struct pcs_flow_node * fl, struct pcs_flow_table_global * gtab) {} -static struct pcs_flow_node * pcs_flow_get(struct pcs_flow_node * fl) {return NULL;} -static int pcs_flow_sequential(struct pcs_flow_node * fl) {return 0;} - - -#endif /* _PCS_FLOW_DETECT_STUB_H_ */ diff --git a/fs/fuse/kio/pcs/pcs_map.c b/fs/fuse/kio/pcs/pcs_map.c index 415d63b6f63f..05eb949b32e0 100644 --- a/fs/fuse/kio/pcs/pcs_map.c +++ b/fs/fuse/kio/pcs/pcs_map.c @@ -198,7 +198,6 @@ void pcs_mapping_init(struct pcs_cluster_core *cc, struct pcs_mapping * mapping) mapping->cluster = cc; INIT_RADIX_TREE(&mapping->map_tree, GFP_ATOMIC); spin_lock_init(&mapping->map_lock); - pcs_flow_table_init(&mapping->ftab, &cc->maps.ftab); } /* Must be called once right after lease is acquired. At that point we already @@ -340,7 +339,6 @@ void pcs_mapping_invalidate(struct pcs_mapping * mapping) pcs_mapping_dump(mapping); map_truncate_tail(mapping, 0); /* If some CSes are still not shutdown, we can have some map entries referenced in their queues */ - pcs_flow_table_fini(&mapping->ftab, &pcs_dentry_from_mapping(mapping)->cluster->maps.ftab); } void pcs_mapping_deinit(struct pcs_mapping * mapping) @@ -999,7 +997,6 @@ struct pcs_cs_list* cslist_alloc( struct pcs_cs_set *css, struct pcs_cs_info *re return NULL; atomic_set(&cs_list->refcnt, 1); - atomic_set(&cs_list->seq_read_in_flight, 0); cs_list->read_index = -1; cs_list->state_flags = 0; cs_list->serno = atomic64_inc_return(&css->csl_serno_gen); @@ -1592,13 +1589,6 @@ static void pcs_cs_wakeup(struct pcs_cs * cs) if (sreq->iochunk.map) pcs_map_put(sreq->iochunk.map); sreq->iochunk.map = map; - if (sreq->iochunk.flow) { - struct pcs_int_request * preq = sreq->completion_data.parent; - - pcs_flow_confirm(sreq->iochunk.flow, &map->mapping->ftab, preq->apireq.req->type == PCS_REQ_T_WRITE, - preq->apireq.req->pos, preq->apireq.req->size, - &sreq->cc->maps.ftab); - } map_submit(map, sreq); } else { map_queue_on_limit(sreq); @@ -1713,11 +1703,6 @@ void pcs_deaccount_ireq(struct pcs_int_request *ireq, pcs_error_t * err) } else { struct pcs_cs * rcs = csl->cs[ireq->iochunk.cs_index].cslink.cs; - if (ireq->flags & IREQ_F_SEQ_READ) { - ireq->flags &= ~IREQ_F_SEQ_READ; - if (atomic_dec_and_test(&csl->seq_read_in_flight)) - WRITE_ONCE(csl->select_stamp, jiffies); - } pcs_cs_deaccount(ireq, rcs, error); @@ -1797,7 +1782,7 @@ static unsigned int get_io_tweaks(struct pcs_cluster_core *cc) return READ_ONCE(cc->cfg.io_tweaks); } -static int select_cs_for_read(struct pcs_cluster_core *cc, struct pcs_cs_list * csl, int is_seq, unsigned int pos, PCS_NODE_ID_T banned_cs) +static int select_cs_for_read(struct pcs_cluster_core *cc, struct pcs_cs_list * csl, unsigned int pos, PCS_NODE_ID_T banned_cs) { abs_time_t now = jiffies; unsigned int local_min, remote_min, local_pipe, remote_pipe; @@ -1848,8 +1833,6 @@ static int select_cs_for_read(struct pcs_cluster_core *cc, struct pcs_cs_list * now < io_prio_stamp + PCS_CS_IO_PRIO_VALID_TIME) w = map_ioprio_to_latency(READ_ONCE(cs->io_prio)) + net_lat; - if (get_io_tweaks(cc) & PCS_TWEAK_USE_FLOW_LOAD) - w += pcs_flow_cs_analysis(cs) * 8000; if (w <= remote_min) { @@ -1885,15 +1868,6 @@ static int select_cs_for_read(struct pcs_cluster_core *cc, struct pcs_cs_list * * all of them random, which is essentially true. */ io_cost = 8000; - if (is_seq) { - int nflows = pcs_flow_analysis(&cc->maps.ftab); - - if (nflows >= PCS_FLOW_THRESH && io_locality < 0) - is_seq = 0; - - if (nflows < PCS_FLOW_THRESH) - io_cost = 500; - } if (local_idx < 0) selected = remote_idx; @@ -1905,10 +1879,7 @@ static int select_cs_for_read(struct pcs_cluster_core *cc, struct pcs_cs_list * } else if (get_io_tweaks(cc) & PCS_TWEAK_IGNORE_SEQUENTIAL) selected = remote_idx; else { - if (is_seq) - selected = local_idx; - else - selected = remote_idx; + selected = remote_idx; } /* Add penalty. The result of current decision will reflect itself in latency @@ -1916,8 +1887,7 @@ static int select_cs_for_read(struct pcs_cluster_core *cc, struct pcs_cs_list * * Ideally it should decay and be replaced with EWMA average introduced by increased latency. * Think about better algorithm, maybe, it is the key to finally correct algorithm. */ - if (!(get_io_tweaks(cc) & PCS_TWEAK_USE_FLOW_LOAD)) - cs_account_latency(csl->cs[selected].cslink.cs, io_cost); + cs_account_latency(csl->cs[selected].cslink.cs, io_cost); return selected; } @@ -1941,7 +1911,6 @@ pcs_ireq_split(struct pcs_int_request *ireq, unsigned int iochunk, int noalign) BUG_ON(!list_empty(&ireq->tok_list)); sreq->tok_reserved = ireq->tok_reserved; sreq->tok_serno = ireq->tok_serno; - sreq->iochunk.flow = pcs_flow_get(ireq->iochunk.flow); sreq->iochunk.cmd = ireq->iochunk.cmd; sreq->iochunk.role = ireq->iochunk.role; sreq->iochunk.cs_index = ireq->iochunk.cs_index; @@ -1997,9 +1966,6 @@ static int pcs_cslist_submit_read(struct pcs_int_request *ireq, struct pcs_cs_li unsigned int iochunk; int allot; int i = -1; - int is_seq, csl_seq = atomic_read(&csl->seq_read_in_flight); - - is_seq = csl_seq || pcs_flow_sequential(ireq->iochunk.flow); i = READ_ONCE(csl->read_index); if (i >= 0) { @@ -2016,19 +1982,18 @@ static int pcs_cslist_submit_read(struct pcs_int_request *ireq, struct pcs_cs_li * 3. No active sequential reads, sequential read from remote CS. Maybe, we want to switch to local. */ if (now > selected + PCS_MAP_MAX_REBALANCE_TIMEOUT || - (!csl_seq && ireq->iochunk.offset == 0) || + (ireq->iochunk.offset == 0) || (get_io_tweaks(cc) & PCS_TWEAK_REBALANCE_ALWAYS) || (now > selected + PCS_MAP_MIN_REBALANCE_TIMEOUT && - (!is_seq || get_io_locality(cc) < 0 || - (!csl_seq && - !(test_bit(CS_SF_LOCAL, &cs->state)) && test_bit(CSL_SF_HAS_LOCAL, &csl->state_flags))))) { + (get_io_locality(cc) < 0 || + (!(test_bit(CS_SF_LOCAL, &cs->state)) && test_bit(CSL_SF_HAS_LOCAL, &csl->state_flags))))) { i = -1; WRITE_ONCE(csl->read_index, -1); } } if (i < 0) { - i = select_cs_for_read(cc, csl, is_seq, ireq->iochunk.offset, ireq->iochunk.banned_cs); + i = select_cs_for_read(cc, csl, ireq->iochunk.offset, ireq->iochunk.banned_cs); if (i < 0) { /* All CSes are blacklisted. Generate error for the first one @@ -2046,9 +2011,8 @@ static int pcs_cslist_submit_read(struct pcs_int_request *ireq, struct pcs_cs_li WRITE_ONCE(csl->read_index, i); WRITE_ONCE(csl->select_stamp, jiffies); - FUSE_KTRACE(ireq->cc->fc, "Selected read map " MAP_FMT " to CS" NODE_FMT "; is_seq=%d", MAP_ARGS(ireq->iochunk.map), - NODE_ARGS(csl->cs[i].cslink.cs->id), is_seq); - pcs_flow_bind_cs(ireq->iochunk.flow, csl->cs[i].cslink.cs); + FUSE_KTRACE(ireq->cc->fc, "Selected read map " MAP_FMT " to CS" NODE_FMT, MAP_ARGS(ireq->iochunk.map), + NODE_ARGS(csl->cs[i].cslink.cs->id)); } cs = csl->cs[i].cslink.cs; @@ -2097,14 +2061,9 @@ static int pcs_cslist_submit_read(struct pcs_int_request *ireq, struct pcs_cs_li } sreq->flags &= ~(IREQ_F_RND_WEIGHT | IREQ_F_SEQ); - BUG_ON(sreq->flags & IREQ_F_SEQ_READ); if (ireq->iochunk.cmd != PCS_REQ_T_READ) { weight = PCS_CS_HOLE_WEIGHT; - } else if (pcs_flow_sequential(sreq->iochunk.flow)) { - sreq->flags |= IREQ_F_SEQ_READ | IREQ_F_SEQ; - atomic_inc(&csl->seq_read_in_flight); - weight = cong_roundup(sreq->iochunk.size); - } else if (sreq->iochunk.size >= 512*1024 || !(get_io_tweaks(cc) & PCS_TWEAK_USE_FLOW_WEIGHT)) { + } else if (sreq->iochunk.size >= 512*1024) { weight = cong_roundup(sreq->iochunk.size); } else { sreq->flags |= IREQ_F_RND_WEIGHT; @@ -2267,14 +2226,9 @@ static int pcs_cslist_submit_write(struct pcs_int_request *ireq, struct pcs_cs_l } sreq->flags &= ~(IREQ_F_RND_WEIGHT | IREQ_F_SEQ); - BUG_ON(sreq->flags & IREQ_F_SEQ_READ); if (ireq->iochunk.cmd != PCS_REQ_T_WRITE) { weight = PCS_CS_HOLE_WEIGHT; - } else if (pcs_flow_sequential(sreq->iochunk.flow)) { - weight = cong_roundup(sreq->iochunk.size); - sreq->flags |= IREQ_F_SEQ; - } else if (!(get_io_tweaks(ireq->cc) & PCS_TWEAK_USE_FLOW_WEIGHT) || - sreq->iochunk.size > 512*1024) { + } else if (sreq->iochunk.size > 512*1024) { weight = cong_roundup(sreq->iochunk.size); } else { weight = 512*1024; @@ -2550,8 +2504,6 @@ noinline void pcs_mapping_truncate(struct pcs_dentry_info *di, u64 new_size) di->local_mtime = get_real_time_ms(); - if (new_size < old_size) - pcs_flow_truncate(&di->mapping.ftab, new_size, &di->cluster->maps.ftab); if (old_size < new_size) offset = old_size; @@ -2904,14 +2856,6 @@ void pcs_map_verify_sync_state(struct pcs_dentry_info *di, struct pcs_int_reques } spin_unlock(&m->lock); - if (ireq->iochunk.flow) { - struct pcs_int_request * preq = ireq->completion_data.parent; - - pcs_flow_confirm(ireq->iochunk.flow, &ireq->dentry->mapping.ftab, - preq->apireq.req->type == PCS_REQ_T_WRITE, - preq->apireq.req->pos, preq->apireq.req->size, - &ireq->cc->maps.ftab); - } } diff --git a/fs/fuse/kio/pcs/pcs_map.h b/fs/fuse/kio/pcs/pcs_map.h index c31b13f05af8..1467e0167ef7 100644 --- a/fs/fuse/kio/pcs/pcs_map.h +++ b/fs/fuse/kio/pcs/pcs_map.h @@ -10,7 +10,6 @@ #include "pcs_client_types.h" #include "pcs_mds_prot.h" -#include "pcs_flow_detect.h" #include "log.h" struct pcs_dentry_info; @@ -43,8 +42,6 @@ struct pcs_int_request; #define PCS_TWEAK_REBALANCE_ALWAYS 1 #define PCS_TWEAK_IGNORE_SEQUENTIAL 2 -#define PCS_TWEAK_USE_FLOW_LOAD 4 -#define PCS_TWEAK_USE_FLOW_WEIGHT 8 struct pcs_cs_link { @@ -113,7 +110,6 @@ struct pcs_cs_list struct pcs_map_entry __rcu *map; /* Currently modified under ::map->lock */ atomic_t refcnt; - atomic_t seq_read_in_flight; int read_index; /* volatile read hint */ unsigned long blacklist; /* Atomic bit field */ abs_time_t blacklist_expires; /* volatile blacklist stamp */ diff --git a/fs/fuse/kio/pcs/pcs_req.h b/fs/fuse/kio/pcs/pcs_req.h index 4c7ee792df29..8e7bf99da805 100644 --- a/fs/fuse/kio/pcs/pcs_req.h +++ b/fs/fuse/kio/pcs/pcs_req.h @@ -115,7 +115,6 @@ struct pcs_int_request int flags; #define IREQ_F_FATAL 1 #define IREQ_F_ONCE 2 -#define IREQ_F_SEQ_READ 4 #define IREQ_F_RND_WEIGHT 8 #define IREQ_F_CACHED 0x10 #define IREQ_F_SEQ 0x20 @@ -173,7 +172,6 @@ struct pcs_int_request struct { struct pcs_map_entry *map; /* Temproraly disable flow */ - struct pcs_flow_node *flow; u8 cmd; u8 role; short cs_index; -- 2.39.5 (Apple Git-154) _______________________________________________ Devel mailing list [email protected] https://lists.openvz.org/mailman/listinfo/devel
