--- a/drivers/md/bcache/alloc_types.h
+++ /dev/null
@@ -1,97 +0,0 @@
-#ifndef _BCACHE_ALLOC_TYPES_H
-#define _BCACHE_ALLOC_TYPES_H
-
-#include <linux/mutex.h>
-
-#include "clock_types.h"
-
-/*
- * There's two of these clocks, one for reads and one for writes:
- *
- * All fields protected by bucket_lock
- */
-struct prio_clock {
-	/*
-	 * "now" in (read/write) IO time - incremented whenever we do X amount
-	 * of reads or writes.
-	 *
-	 * Goes with the bucket read/write prios: when we read or write to a
-	 * bucket we reset the bucket's prio to the current hand; thus hand -
-	 * prio = time since bucket was last read/written.
-	 *
-	 * The units are some amount (bytes/sectors) of data read/written, and
-	 * the units can change on the fly if we need to rescale to fit
-	 * everything in a u16 - your only guarantee is that the units are
-	 * consistent.
-	 */
-	u16			hand;
-	u16			min_prio;
-
-	int			rw;
-
-	struct io_timer		rescale;
-};
-
-/* There is one reserve for each type of btree, one for prios and gens
- * and one for moving GC */
-enum alloc_reserve {
-	RESERVE_PRIO,
-	RESERVE_BTREE,
-	RESERVE_METADATA_LAST = RESERVE_BTREE,
-	RESERVE_MOVINGGC,
-
-	RESERVE_NONE,
-	RESERVE_NR,
-};
-
-static inline bool allocation_is_metadata(enum alloc_reserve id)
-{
-	return id <= RESERVE_METADATA_LAST;
-}
-
-/* Enough for 16 cache devices, 2 tiers and some left over for pipelining */
-#define OPEN_BUCKETS_COUNT	256
-
-#define WRITE_POINT_COUNT	16
-
-struct open_bucket {
-	struct list_head	list;
-	struct mutex		lock;
-	atomic_t		pin;
-	unsigned		sectors_free;
-	unsigned		nr_ptrs;
-	struct bch_extent_ptr	ptrs[BKEY_EXTENT_PTRS_MAX];
-};
-
-struct write_point {
-	struct open_bucket	*b;
-
-	/*
-	 * Throttle writes to this write point if tier 0 is full?
-	 */
-	bool			throttle;
-
-	/*
-	 * If 0, use the desired replica count for the cache set.
-	 * Otherwise, this is the number of replicas desired (generally 1).
-	 */
-	unsigned		nr_replicas;
-
-	/*
-	 * Bucket reserve to allocate from.
-	 */
-	enum alloc_reserve	reserve;
-
-	/*
-	 * If not NULL, cache group for tiering, promotion and moving GC -
-	 * always allocates a single replica
-	 */
-	struct cache_group	*group;
-
-	/*
-	 * Otherwise do a normal replicated bucket allocation that could come
-	 * from any device in tier 0 (foreground write)
-	 */
-};
-
-#endif /* _BCACHE_ALLOC_TYPES_H */
--- a/drivers/md/bcache/clock_types.h
+++ /dev/null
@@ -1,32 +0,0 @@
-#ifndef _BCACHE_CLOCK_TYPES_H
-#define _BCACHE_CLOCK_TYPES_H
-
-#define NR_IO_TIMERS		8
-
-/*
- * Clocks/timers in units of sectors of IO:
- *
- * Note - they use percpu batching, so they're only approximate.
- */
-
-struct io_timer;
-typedef void (*io_timer_fn)(struct io_timer *);
-
-struct io_timer {
-	io_timer_fn		fn;
-	unsigned long		expire;
-};
-
-/* Amount to buffer up on a percpu counter */
-#define IO_CLOCK_PCPU_SECTORS	128
-
-struct io_clock {
-	atomic_long_t		now;
-	u16 __percpu		*pcpu_buf;
-
-	spinlock_t		timer_lock;
-	DECLARE_HEAP(struct io_timer *, timers);
-};
-
-#endif /* _BCACHE_CLOCK_TYPES_H */
-
--- a/drivers/md/bcache/io.c
+++ b/drivers/md/bcache/io.c
@@ -1649,6 +1649,7 @@
 	 * which was set by bch_read_fn().
 	 */
 	//inode = bbio->key.k.p.inode;
+	inode = 0;
 	parent = bio->bi_private;
 
 	bch_bbio_reset(bbio);
--- /dev/null
+++ b/include/trace/events/alloc_types.h
@@ -0,0 +1,97 @@
+#ifndef _BCACHE_ALLOC_TYPES_H
+#define _BCACHE_ALLOC_TYPES_H
+
+#include <linux/mutex.h>
+
+#include "clock_types.h"
+
+/*
+ * There's two of these clocks, one for reads and one for writes:
+ *
+ * All fields protected by bucket_lock
+ */
+struct prio_clock {
+	/*
+	 * "now" in (read/write) IO time - incremented whenever we do X amount
+	 * of reads or writes.
+	 *
+	 * Goes with the bucket read/write prios: when we read or write to a
+	 * bucket we reset the bucket's prio to the current hand; thus hand -
+	 * prio = time since bucket was last read/written.
+	 *
+	 * The units are some amount (bytes/sectors) of data read/written, and
+	 * the units can change on the fly if we need to rescale to fit
+	 * everything in a u16 - your only guarantee is that the units are
+	 * consistent.
+	 */
+	u16			hand;
+	u16			min_prio;
+
+	int			rw;
+
+	struct io_timer		rescale;
+};
+
+/* There is one reserve for each type of btree, one for prios and gens
+ * and one for moving GC */
+enum alloc_reserve {
+	RESERVE_PRIO,
+	RESERVE_BTREE,
+	RESERVE_METADATA_LAST = RESERVE_BTREE,
+	RESERVE_MOVINGGC,
+
+	RESERVE_NONE,
+	RESERVE_NR,
+};
+
+static inline bool allocation_is_metadata(enum alloc_reserve id)
+{
+	return id <= RESERVE_METADATA_LAST;
+}
+
+/* Enough for 16 cache devices, 2 tiers and some left over for pipelining */
+#define OPEN_BUCKETS_COUNT	256
+
+#define WRITE_POINT_COUNT	16
+
+struct open_bucket {
+	struct list_head	list;
+	struct mutex		lock;
+	atomic_t		pin;
+	unsigned		sectors_free;
+	unsigned		nr_ptrs;
+	struct bch_extent_ptr	ptrs[BKEY_EXTENT_PTRS_MAX];
+};
+
+struct write_point {
+	struct open_bucket	*b;
+
+	/*
+	 * Throttle writes to this write point if tier 0 is full?
+	 */
+	bool			throttle;
+
+	/*
+	 * If 0, use the desired replica count for the cache set.
+	 * Otherwise, this is the number of replicas desired (generally 1).
+	 */
+	unsigned		nr_replicas;
+
+	/*
+	 * Bucket reserve to allocate from.
+	 */
+	enum alloc_reserve	reserve;
+
+	/*
+	 * If not NULL, cache group for tiering, promotion and moving GC -
+	 * always allocates a single replica
+	 */
+	struct cache_group	*group;
+
+	/*
+	 * Otherwise do a normal replicated bucket allocation that could come
+	 * from any device in tier 0 (foreground write)
+	 */
+};
+
+#endif /* _BCACHE_ALLOC_TYPES_H */
--- /dev/null
+++ b/include/trace/events/clock_types.h
@@ -0,0 +1,32 @@
+#ifndef _BCACHE_CLOCK_TYPES_H
+#define _BCACHE_CLOCK_TYPES_H
+
+#define NR_IO_TIMERS		8
+
+/*
+ * Clocks/timers in units of sectors of IO:
+ *
+ * Note - they use percpu batching, so they're only approximate.
+ */
+
+struct io_timer;
+typedef void (*io_timer_fn)(struct io_timer *);
+
+struct io_timer {
+	io_timer_fn		fn;
+	unsigned long		expire;
+};
+
+/* Amount to buffer up on a percpu counter */
+#define IO_CLOCK_PCPU_SECTORS	128
+
+struct io_clock {
+	atomic_long_t		now;
+	u16 __percpu		*pcpu_buf;
+
+	spinlock_t		timer_lock;
+	DECLARE_HEAP(struct io_timer *, timers);
+};
+
+#endif /* _BCACHE_CLOCK_TYPES_H */
+
--- a/drivers/md/bcache/alloc.h
+++ b/drivers/md/bcache/alloc.h
@@ -1,7 +1,7 @@
 #ifndef _BCACHE_ALLOC_H
 #define _BCACHE_ALLOC_H
 
-#include "alloc_types.h"
+#include <trace/events/alloc_types.h>
 
 struct bkey;
 struct bucket;
--- a/drivers/md/bcache/bcache.h
+++ b/drivers/md/bcache/bcache.h
@@ -207,10 +207,10 @@
 #define bch_meta_write_fault(name)					\
 	 dynamic_fault("bcache:meta:write:" name)
 
-#include "alloc_types.h"
+#include <trace/events/alloc_types.h>
 #include "blockdev_types.h"
 #include "buckets_types.h"
-#include "clock_types.h"
+#include <trace/events/clock_types.h>
 #include "io_types.h"
 #include "journal_types.h"
 #include "keylist_types.h"
