summaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
Diffstat (limited to 'include')
-rw-r--r--include/linux/acpi_dma.h116
-rw-r--r--include/linux/aio.h178
-rw-r--r--include/linux/backing-dev.h16
-rw-r--r--include/linux/bio.h115
-rw-r--r--include/linux/blk_types.h5
-rw-r--r--include/linux/blkdev.h31
-rw-r--r--include/linux/cgroup.h1
-rw-r--r--include/linux/cgroup_subsys.h6
-rw-r--r--include/linux/cpu_cooling.h25
-rw-r--r--include/linux/dmaengine.h15
-rw-r--r--include/linux/drbd.h5
-rw-r--r--include/linux/drbd_limits.h11
-rw-r--r--include/linux/errno.h1
-rw-r--r--include/linux/f2fs_fs.h17
-rw-r--r--include/linux/fs.h2
-rw-r--r--include/linux/gpio.h6
-rw-r--r--include/linux/hugetlb.h19
-rw-r--r--include/linux/idr.h10
-rw-r--r--include/linux/kref.h9
-rw-r--r--include/linux/lru_cache.h1
-rw-r--r--include/linux/mlx4/device.h104
-rw-r--r--include/linux/mlx4/srq.h2
-rw-r--r--include/linux/mm.h20
-rw-r--r--include/linux/mtd/blktrans.h2
-rw-r--r--include/linux/mtd/mtd.h8
-rw-r--r--include/linux/mtd/nand.h121
-rw-r--r--include/linux/mtd/physmap.h2
-rw-r--r--include/linux/mtd/plat-ram.h4
-rw-r--r--include/linux/of_dma.h10
-rw-r--r--include/linux/pid_namespace.h1
-rw-r--r--include/linux/platform_data/elm.h2
-rw-r--r--include/linux/random.h7
-rw-r--r--include/linux/rwsem.h10
-rw-r--r--include/linux/sched.h6
-rw-r--r--include/linux/sudmac.h52
-rw-r--r--include/linux/thermal.h15
-rw-r--r--include/linux/wait.h86
-rw-r--r--include/linux/writeback.h1
-rw-r--r--include/trace/events/bcache.h271
-rw-r--r--include/trace/events/block.h12
-rw-r--r--include/trace/events/f2fs.h682
-rw-r--r--include/trace/events/writeback.h5
42 files changed, 1637 insertions, 375 deletions
diff --git a/include/linux/acpi_dma.h b/include/linux/acpi_dma.h
new file mode 100644
index 000000000000..d09deabc7bf6
--- /dev/null
+++ b/include/linux/acpi_dma.h
@@ -0,0 +1,116 @@
+/*
+ * ACPI helpers for DMA request / controller
+ *
+ * Based on of_dma.h
+ *
+ * Copyright (C) 2013, Intel Corporation
+ * Author: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __LINUX_ACPI_DMA_H
+#define __LINUX_ACPI_DMA_H
+
+#include <linux/list.h>
+#include <linux/device.h>
+#include <linux/dmaengine.h>
+
+/**
+ * struct acpi_dma_spec - slave device DMA resources
+ * @chan_id: channel unique id
+ * @slave_id: request line unique id
+ * @dev: struct device of the DMA controller to be used in the filter
+ * function
+ */
+struct acpi_dma_spec {
+ int chan_id;
+ int slave_id;
+ struct device *dev;
+};
+
+/**
+ * struct acpi_dma - representation of the registered DMAC
+ * @dma_controllers: linked list node
+ * @dev: struct device of this controller
+ * @acpi_dma_xlate: callback function to find a suitable channel
+ * @data: private data used by a callback function
+ */
+struct acpi_dma {
+ struct list_head dma_controllers;
+ struct device *dev;
+ struct dma_chan *(*acpi_dma_xlate)
+ (struct acpi_dma_spec *, struct acpi_dma *);
+ void *data;
+};
+
+/* Used with acpi_dma_simple_xlate() */
+struct acpi_dma_filter_info {
+ dma_cap_mask_t dma_cap;
+ dma_filter_fn filter_fn;
+};
+
+#ifdef CONFIG_DMA_ACPI
+
+int acpi_dma_controller_register(struct device *dev,
+ struct dma_chan *(*acpi_dma_xlate)
+ (struct acpi_dma_spec *, struct acpi_dma *),
+ void *data);
+int acpi_dma_controller_free(struct device *dev);
+int devm_acpi_dma_controller_register(struct device *dev,
+ struct dma_chan *(*acpi_dma_xlate)
+ (struct acpi_dma_spec *, struct acpi_dma *),
+ void *data);
+void devm_acpi_dma_controller_free(struct device *dev);
+
+struct dma_chan *acpi_dma_request_slave_chan_by_index(struct device *dev,
+ size_t index);
+struct dma_chan *acpi_dma_request_slave_chan_by_name(struct device *dev,
+ const char *name);
+
+struct dma_chan *acpi_dma_simple_xlate(struct acpi_dma_spec *dma_spec,
+ struct acpi_dma *adma);
+#else
+
+static inline int acpi_dma_controller_register(struct device *dev,
+ struct dma_chan *(*acpi_dma_xlate)
+ (struct acpi_dma_spec *, struct acpi_dma *),
+ void *data)
+{
+ return -ENODEV;
+}
+static inline int acpi_dma_controller_free(struct device *dev)
+{
+ return -ENODEV;
+}
+static inline int devm_acpi_dma_controller_register(struct device *dev,
+ struct dma_chan *(*acpi_dma_xlate)
+ (struct acpi_dma_spec *, struct acpi_dma *),
+ void *data)
+{
+ return -ENODEV;
+}
+static inline void devm_acpi_dma_controller_free(struct device *dev)
+{
+}
+
+static inline struct dma_chan *acpi_dma_request_slave_chan_by_index(
+ struct device *dev, size_t index)
+{
+ return NULL;
+}
+static inline struct dma_chan *acpi_dma_request_slave_chan_by_name(
+ struct device *dev, const char *name)
+{
+ return NULL;
+}
+
+#define acpi_dma_simple_xlate NULL
+
+#endif
+
+#define acpi_dma_request_slave_channel acpi_dma_request_slave_chan_by_index
+
+#endif /* __LINUX_ACPI_DMA_H */
diff --git a/include/linux/aio.h b/include/linux/aio.h
index 31ff6dba4872..1bdf965339f9 100644
--- a/include/linux/aio.h
+++ b/include/linux/aio.h
@@ -9,91 +9,32 @@
#include <linux/atomic.h>
-#define AIO_MAXSEGS 4
-#define AIO_KIOGRP_NR_ATOMIC 8
-
struct kioctx;
+struct kiocb;
-/* Notes on cancelling a kiocb:
- * If a kiocb is cancelled, aio_complete may return 0 to indicate
- * that cancel has not yet disposed of the kiocb. All cancel
- * operations *must* call aio_put_req to dispose of the kiocb
- * to guard against races with the completion code.
- */
-#define KIOCB_C_CANCELLED 0x01
-#define KIOCB_C_COMPLETE 0x02
-
-#define KIOCB_SYNC_KEY (~0U)
+#define KIOCB_KEY 0
-/* ki_flags bits */
/*
- * This may be used for cancel/retry serialization in the future, but
- * for now it's unused and we probably don't want modules to even
- * think they can use it.
+ * We use ki_cancel == KIOCB_CANCELLED to indicate that a kiocb has been either
+ * cancelled or completed (this makes a certain amount of sense because
+ * successful cancellation - io_cancel() - does deliver the completion to
+ * userspace).
+ *
+ * And since most things don't implement kiocb cancellation and we'd really like
+ * kiocb completion to be lockless when possible, we use ki_cancel to
+ * synchronize cancellation and completion - we only set it to KIOCB_CANCELLED
+ * with xchg() or cmpxchg(), see batch_complete_aio() and kiocb_cancel().
*/
-/* #define KIF_LOCKED 0 */
-#define KIF_KICKED 1
-#define KIF_CANCELLED 2
-
-#define kiocbTryLock(iocb) test_and_set_bit(KIF_LOCKED, &(iocb)->ki_flags)
-#define kiocbTryKick(iocb) test_and_set_bit(KIF_KICKED, &(iocb)->ki_flags)
+#define KIOCB_CANCELLED ((void *) (~0ULL))
-#define kiocbSetLocked(iocb) set_bit(KIF_LOCKED, &(iocb)->ki_flags)
-#define kiocbSetKicked(iocb) set_bit(KIF_KICKED, &(iocb)->ki_flags)
-#define kiocbSetCancelled(iocb) set_bit(KIF_CANCELLED, &(iocb)->ki_flags)
+typedef int (kiocb_cancel_fn)(struct kiocb *, struct io_event *);
-#define kiocbClearLocked(iocb) clear_bit(KIF_LOCKED, &(iocb)->ki_flags)
-#define kiocbClearKicked(iocb) clear_bit(KIF_KICKED, &(iocb)->ki_flags)
-#define kiocbClearCancelled(iocb) clear_bit(KIF_CANCELLED, &(iocb)->ki_flags)
-
-#define kiocbIsLocked(iocb) test_bit(KIF_LOCKED, &(iocb)->ki_flags)
-#define kiocbIsKicked(iocb) test_bit(KIF_KICKED, &(iocb)->ki_flags)
-#define kiocbIsCancelled(iocb) test_bit(KIF_CANCELLED, &(iocb)->ki_flags)
-
-/* is there a better place to document function pointer methods? */
-/**
- * ki_retry - iocb forward progress callback
- * @kiocb: The kiocb struct to advance by performing an operation.
- *
- * This callback is called when the AIO core wants a given AIO operation
- * to make forward progress. The kiocb argument describes the operation
- * that is to be performed. As the operation proceeds, perhaps partially,
- * ki_retry is expected to update the kiocb with progress made. Typically
- * ki_retry is set in the AIO core and it itself calls file_operations
- * helpers.
- *
- * ki_retry's return value determines when the AIO operation is completed
- * and an event is generated in the AIO event ring. Except the special
- * return values described below, the value that is returned from ki_retry
- * is transferred directly into the completion ring as the operation's
- * resulting status. Once this has happened ki_retry *MUST NOT* reference
- * the kiocb pointer again.
- *
- * If ki_retry returns -EIOCBQUEUED it has made a promise that aio_complete()
- * will be called on the kiocb pointer in the future. The AIO core will
- * not ask the method again -- ki_retry must ensure forward progress.
- * aio_complete() must be called once and only once in the future, multiple
- * calls may result in undefined behaviour.
- *
- * If ki_retry returns -EIOCBRETRY it has made a promise that kick_iocb()
- * will be called on the kiocb pointer in the future. This may happen
- * through generic helpers that associate kiocb->ki_wait with a wait
- * queue head that ki_retry uses via current->io_wait. It can also happen
- * with custom tracking and manual calls to kick_iocb(), though that is
- * discouraged. In either case, kick_iocb() must be called once and only
- * once. ki_retry must ensure forward progress, the AIO core will wait
- * indefinitely for kick_iocb() to be called.
- */
struct kiocb {
- struct list_head ki_run_list;
- unsigned long ki_flags;
- int ki_users;
- unsigned ki_key; /* id of this request */
+ atomic_t ki_users;
struct file *ki_filp;
- struct kioctx *ki_ctx; /* may be NULL for sync ops */
- int (*ki_cancel)(struct kiocb *, struct io_event *);
- ssize_t (*ki_retry)(struct kiocb *);
+ struct kioctx *ki_ctx; /* NULL for sync ops */
+ kiocb_cancel_fn *ki_cancel;
void (*ki_dtor)(struct kiocb *);
union {
@@ -117,7 +58,6 @@ struct kiocb {
struct list_head ki_list; /* the aio core uses this
* for cancellation */
- struct list_head ki_batch; /* batch allocation */
/*
* If the aio_resfd field of the userspace iocb is not zero,
@@ -128,106 +68,40 @@ struct kiocb {
static inline bool is_sync_kiocb(struct kiocb *kiocb)
{
- return kiocb->ki_key == KIOCB_SYNC_KEY;
+ return kiocb->ki_ctx == NULL;
}
static inline void init_sync_kiocb(struct kiocb *kiocb, struct file *filp)
{
*kiocb = (struct kiocb) {
- .ki_users = 1,
- .ki_key = KIOCB_SYNC_KEY,
+ .ki_users = ATOMIC_INIT(1),
+ .ki_ctx = NULL,
.ki_filp = filp,
.ki_obj.tsk = current,
};
}
-#define AIO_RING_MAGIC 0xa10a10a1
-#define AIO_RING_COMPAT_FEATURES 1
-#define AIO_RING_INCOMPAT_FEATURES 0
-struct aio_ring {
- unsigned id; /* kernel internal index number */
- unsigned nr; /* number of io_events */
- unsigned head;
- unsigned tail;
-
- unsigned magic;
- unsigned compat_features;
- unsigned incompat_features;
- unsigned header_length; /* size of aio_ring */
-
-
- struct io_event io_events[0];
-}; /* 128 bytes + ring size */
-
-#define AIO_RING_PAGES 8
-struct aio_ring_info {
- unsigned long mmap_base;
- unsigned long mmap_size;
-
- struct page **ring_pages;
- spinlock_t ring_lock;
- long nr_pages;
-
- unsigned nr, tail;
-
- struct page *internal_pages[AIO_RING_PAGES];
-};
-
-static inline unsigned aio_ring_avail(struct aio_ring_info *info,
- struct aio_ring *ring)
-{
- return (ring->head + info->nr - 1 - ring->tail) % info->nr;
-}
-
-struct kioctx {
- atomic_t users;
- int dead;
- struct mm_struct *mm;
-
- /* This needs improving */
- unsigned long user_id;
- struct hlist_node list;
-
- wait_queue_head_t wait;
-
- spinlock_t ctx_lock;
-
- int reqs_active;
- struct list_head active_reqs; /* used for cancellation */
- struct list_head run_list; /* used for kicked reqs */
-
- /* sys_io_setup currently limits this to an unsigned int */
- unsigned max_reqs;
-
- struct aio_ring_info ring_info;
-
- struct delayed_work wq;
-
- struct rcu_head rcu_head;
-};
-
/* prototypes */
-extern unsigned aio_max_size;
-
#ifdef CONFIG_AIO
extern ssize_t wait_on_sync_kiocb(struct kiocb *iocb);
-extern int aio_put_req(struct kiocb *iocb);
-extern void kick_iocb(struct kiocb *iocb);
-extern int aio_complete(struct kiocb *iocb, long res, long res2);
+extern void aio_put_req(struct kiocb *iocb);
+extern void aio_complete(struct kiocb *iocb, long res, long res2);
struct mm_struct;
extern void exit_aio(struct mm_struct *mm);
extern long do_io_submit(aio_context_t ctx_id, long nr,
struct iocb __user *__user *iocbpp, bool compat);
+void kiocb_set_cancel_fn(struct kiocb *req, kiocb_cancel_fn *cancel);
#else
static inline ssize_t wait_on_sync_kiocb(struct kiocb *iocb) { return 0; }
-static inline int aio_put_req(struct kiocb *iocb) { return 0; }
-static inline void kick_iocb(struct kiocb *iocb) { }
-static inline int aio_complete(struct kiocb *iocb, long res, long res2) { return 0; }
+static inline void aio_put_req(struct kiocb *iocb) { }
+static inline void aio_complete(struct kiocb *iocb, long res, long res2) { }
struct mm_struct;
static inline void exit_aio(struct mm_struct *mm) { }
static inline long do_io_submit(aio_context_t ctx_id, long nr,
struct iocb __user * __user *iocbpp,
bool compat) { return 0; }
+static inline void kiocb_set_cancel_fn(struct kiocb *req,
+ kiocb_cancel_fn *cancel) { }
#endif /* CONFIG_AIO */
static inline struct kiocb *list_kiocb(struct list_head *h)
diff --git a/include/linux/backing-dev.h b/include/linux/backing-dev.h
index 350459910fe1..c3881553f7d1 100644
--- a/include/linux/backing-dev.h
+++ b/include/linux/backing-dev.h
@@ -18,6 +18,7 @@
#include <linux/writeback.h>
#include <linux/atomic.h>
#include <linux/sysctl.h>
+#include <linux/workqueue.h>
struct page;
struct device;
@@ -27,7 +28,6 @@ struct dentry;
* Bits in backing_dev_info.state
*/
enum bdi_state {
- BDI_pending, /* On its way to being activated */
BDI_wb_alloc, /* Default embedded wb allocated */
BDI_async_congested, /* The async (write) queue is getting full */
BDI_sync_congested, /* The sync queue is getting full */
@@ -53,10 +53,8 @@ struct bdi_writeback {
unsigned int nr;
unsigned long last_old_flush; /* last old data flush */
- unsigned long last_active; /* last time bdi thread was active */
- struct task_struct *task; /* writeback thread */
- struct timer_list wakeup_timer; /* used for delayed bdi thread wakeup */
+ struct delayed_work dwork; /* work item used for writeback */
struct list_head b_dirty; /* dirty inodes */
struct list_head b_io; /* parked for writeback */
struct list_head b_more_io; /* parked for more writeback */
@@ -123,14 +121,15 @@ int bdi_setup_and_register(struct backing_dev_info *, char *, unsigned int);
void bdi_start_writeback(struct backing_dev_info *bdi, long nr_pages,
enum wb_reason reason);
void bdi_start_background_writeback(struct backing_dev_info *bdi);
-int bdi_writeback_thread(void *data);
+void bdi_writeback_workfn(struct work_struct *work);
int bdi_has_dirty_io(struct backing_dev_info *bdi);
void bdi_wakeup_thread_delayed(struct backing_dev_info *bdi);
void bdi_lock_two(struct bdi_writeback *wb1, struct bdi_writeback *wb2);
extern spinlock_t bdi_lock;
extern struct list_head bdi_list;
-extern struct list_head bdi_pending_list;
+
+extern struct workqueue_struct *bdi_wq;
static inline int wb_has_dirty_io(struct bdi_writeback *wb)
{
@@ -336,11 +335,6 @@ static inline bool bdi_cap_swap_backed(struct backing_dev_info *bdi)
return bdi->capabilities & BDI_CAP_SWAP_BACKED;
}
-static inline bool bdi_cap_flush_forker(struct backing_dev_info *bdi)
-{
- return bdi == &default_backing_dev_info;
-}
-
static inline bool mapping_cap_writeback_dirty(struct address_space *mapping)
{
return bdi_cap_writeback_dirty(mapping->backing_dev_info);
diff --git a/include/linux/bio.h b/include/linux/bio.h
index 820e7aaad4fd..ef24466d8f82 100644
--- a/include/linux/bio.h
+++ b/include/linux/bio.h
@@ -67,6 +67,7 @@
#define bio_offset(bio) bio_iovec((bio))->bv_offset
#define bio_segments(bio) ((bio)->bi_vcnt - (bio)->bi_idx)
#define bio_sectors(bio) ((bio)->bi_size >> 9)
+#define bio_end_sector(bio) ((bio)->bi_sector + bio_sectors((bio)))
static inline unsigned int bio_cur_bytes(struct bio *bio)
{
@@ -84,11 +85,6 @@ static inline void *bio_data(struct bio *bio)
return NULL;
}
-static inline int bio_has_allocated_vec(struct bio *bio)
-{
- return bio->bi_io_vec && bio->bi_io_vec != bio->bi_inline_vecs;
-}
-
/*
* will die
*/
@@ -136,16 +132,27 @@ static inline int bio_has_allocated_vec(struct bio *bio)
#define bio_io_error(bio) bio_endio((bio), -EIO)
/*
- * drivers should not use the __ version unless they _really_ want to
- * run through the entire bio and not just pending pieces
+ * drivers should not use the __ version unless they _really_ know what
+ * they're doing
*/
#define __bio_for_each_segment(bvl, bio, i, start_idx) \
for (bvl = bio_iovec_idx((bio), (start_idx)), i = (start_idx); \
i < (bio)->bi_vcnt; \
bvl++, i++)
+/*
+ * drivers should _never_ use the all version - the bio may have been split
+ * before it got to the driver and the driver won't own all of it
+ */
+#define bio_for_each_segment_all(bvl, bio, i) \
+ for (i = 0; \
+ bvl = bio_iovec_idx((bio), (i)), i < (bio)->bi_vcnt; \
+ i++)
+
#define bio_for_each_segment(bvl, bio, i) \
- __bio_for_each_segment(bvl, bio, i, (bio)->bi_idx)
+ for (i = (bio)->bi_idx; \
+ bvl = bio_iovec_idx((bio), (i)), i < (bio)->bi_vcnt; \
+ i++)
/*
* get a reference to a bio, so it won't disappear. the intended use is
@@ -180,9 +187,12 @@ struct bio_integrity_payload {
unsigned short bip_slab; /* slab the bip came from */
unsigned short bip_vcnt; /* # of integrity bio_vecs */
unsigned short bip_idx; /* current bip_vec index */
+ unsigned bip_owns_buf:1; /* should free bip_buf */
struct work_struct bip_work; /* I/O completion */
- struct bio_vec bip_vec[0]; /* embedded bvec array */
+
+ struct bio_vec *bip_vec;
+ struct bio_vec bip_inline_vecs[0];/* embedded bvec array */
};
#endif /* CONFIG_BLK_DEV_INTEGRITY */
@@ -211,6 +221,7 @@ extern void bio_pair_release(struct bio_pair *dbio);
extern struct bio_set *bioset_create(unsigned int, unsigned int);
extern void bioset_free(struct bio_set *);
+extern mempool_t *biovec_create_pool(struct bio_set *bs, int pool_entries);
extern struct bio *bio_alloc_bioset(gfp_t, int, struct bio_set *);
extern void bio_put(struct bio *);
@@ -245,6 +256,9 @@ extern void bio_endio(struct bio *, int);
struct request_queue;
extern int bio_phys_segments(struct request_queue *, struct bio *);
+extern int submit_bio_wait(int rw, struct bio *bio);
+extern void bio_advance(struct bio *, unsigned);
+
extern void bio_init(struct bio *);
extern void bio_reset(struct bio *);
@@ -279,6 +293,9 @@ static inline void bio_flush_dcache_pages(struct bio *bi)
}
#endif
+extern void bio_copy_data(struct bio *dst, struct bio *src);
+extern int bio_alloc_pages(struct bio *bio, gfp_t gfp);
+
extern struct bio *bio_copy_user(struct request_queue *, struct rq_map_data *,
unsigned long, unsigned int, int, gfp_t);
extern struct bio *bio_copy_user_iov(struct request_queue *,
@@ -286,8 +303,8 @@ extern struct bio *bio_copy_user_iov(struct request_queue *,
int, int, gfp_t);
extern int bio_uncopy_user(struct bio *);
void zero_fill_bio(struct bio *bio);
-extern struct bio_vec *bvec_alloc_bs(gfp_t, int, unsigned long *, struct bio_set *);
-extern void bvec_free_bs(struct bio_set *, struct bio_vec *, unsigned int);
+extern struct bio_vec *bvec_alloc(gfp_t, int, unsigned long *, mempool_t *);
+extern void bvec_free(mempool_t *, struct bio_vec *, unsigned int);
extern unsigned int bvec_nr_vecs(unsigned short idx);
#ifdef CONFIG_BLK_CGROUP
@@ -298,39 +315,6 @@ static inline int bio_associate_current(struct bio *bio) { return -ENOENT; }
static inline void bio_disassociate_task(struct bio *bio) { }
#endif /* CONFIG_BLK_CGROUP */
-/*
- * bio_set is used to allow other portions of the IO system to
- * allocate their own private memory pools for bio and iovec structures.
- * These memory pools in turn all allocate from the bio_slab
- * and the bvec_slabs[].
- */
-#define BIO_POOL_SIZE 2
-#define BIOVEC_NR_POOLS 6
-#define BIOVEC_MAX_IDX (BIOVEC_NR_POOLS - 1)
-
-struct bio_set {
- struct kmem_cache *bio_slab;
- unsigned int front_pad;
-
- mempool_t *bio_pool;
-#if defined(CONFIG_BLK_DEV_INTEGRITY)
- mempool_t *bio_integrity_pool;
-#endif
- mempool_t *bvec_pool;
-};
-
-struct biovec_slab {
- int nr_vecs;
- char *name;
- struct kmem_cache *slab;
-};
-
-/*
- * a small number of entries is fine, not going to be performance critical.
- * basically we just need to survive
- */
-#define BIO_SPLIT_ENTRIES 2
-
#ifdef CONFIG_HIGHMEM
/*
* remember never ever reenable interrupts between a bvec_kmap_irq and
@@ -527,6 +511,49 @@ static inline struct bio *bio_list_get(struct bio_list *bl)
return bio;
}
+/*
+ * bio_set is used to allow other portions of the IO system to
+ * allocate their own private memory pools for bio and iovec structures.
+ * These memory pools in turn all allocate from the bio_slab
+ * and the bvec_slabs[].
+ */
+#define BIO_POOL_SIZE 2
+#define BIOVEC_NR_POOLS 6
+#define BIOVEC_MAX_IDX (BIOVEC_NR_POOLS - 1)
+
+struct bio_set {
+ struct kmem_cache *bio_slab;
+ unsigned int front_pad;
+
+ mempool_t *bio_pool;
+ mempool_t *bvec_pool;
+#if defined(CONFIG_BLK_DEV_INTEGRITY)
+ mempool_t *bio_integrity_pool;
+ mempool_t *bvec_integrity_pool;
+#endif
+
+ /*
+ * Deadlock avoidance for stacking block drivers: see comments in
+ * bio_alloc_bioset() for details
+ */
+ spinlock_t rescue_lock;
+ struct bio_list rescue_list;
+ struct work_struct rescue_work;
+ struct workqueue_struct *rescue_workqueue;
+};
+
+struct biovec_slab {
+ int nr_vecs;
+ char *name;
+ struct kmem_cache *slab;
+};
+
+/*
+ * a small number of entries is fine, not going to be performance critical.
+ * basically we just need to survive
+ */
+#define BIO_SPLIT_ENTRIES 2
+
#if defined(CONFIG_BLK_DEV_INTEGRITY)
#define bip_vec_idx(bip, idx) (&(bip->bip_vec[(idx)]))
diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h
index 22990cf4439d..fa1abeb45b76 100644
--- a/include/linux/blk_types.h
+++ b/include/linux/blk_types.h
@@ -118,6 +118,7 @@ struct bio {
* BIO_POOL_IDX()
*/
#define BIO_RESET_BITS 13
+#define BIO_OWNS_VEC 13 /* bio_free() should free bvec */
#define bio_flagged(bio, flag) ((bio)->bi_flags & (1 << (flag)))
@@ -176,6 +177,7 @@ enum rq_flag_bits {
__REQ_IO_STAT, /* account I/O stat */
__REQ_MIXED_MERGE, /* merge of different types, fail separately */
__REQ_KERNEL, /* direct IO to kernel pages */
+ __REQ_PM, /* runtime pm request */
__REQ_NR_BITS, /* stops here */
};
@@ -198,6 +200,8 @@ enum rq_flag_bits {
REQ_SECURE)
#define REQ_CLONE_MASK REQ_COMMON_MASK
+#define BIO_NO_ADVANCE_ITER_MASK (REQ_DISCARD|REQ_WRITE_SAME)
+
/* This mask is used for both bio and request merge checking */
#define REQ_NOMERGE_FLAGS \
(REQ_NOMERGE | REQ_STARTED | REQ_SOFTBARRIER | REQ_FLUSH | REQ_FUA)
@@ -224,5 +228,6 @@ enum rq_flag_bits {
#define REQ_MIXED_MERGE (1 << __REQ_MIXED_MERGE)
#define REQ_SECURE (1 << __REQ_SECURE)
#define REQ_KERNEL (1 << __REQ_KERNEL)
+#define REQ_PM (1 << __REQ_PM)
#endif /* __LINUX_BLK_TYPES_H */
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 78feda9bbae2..2fdb4a451b49 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -361,6 +361,12 @@ struct request_queue {
*/
struct kobject kobj;
+#ifdef CONFIG_PM_RUNTIME
+ struct device *dev;
+ int rpm_status;
+ unsigned int nr_pending;
+#endif
+
/*
* queue settings
*/
@@ -838,7 +844,7 @@ static inline unsigned int blk_queue_get_max_sectors(struct request_queue *q,
unsigned int cmd_flags)
{
if (unlikely(cmd_flags & REQ_DISCARD))
- return q->limits.max_discard_sectors;
+ return min(q->limits.max_discard_sectors, UINT_MAX >> 9);
if (unlikely(cmd_flags & REQ_WRITE_SAME))
return q->limits.max_write_same_sectors;
@@ -961,6 +967,27 @@ struct request_queue *blk_alloc_queue_node(gfp_t, int);
extern void blk_put_queue(struct request_queue *);
/*
+ * block layer runtime pm functions
+ */
+#ifdef CONFIG_PM_RUNTIME
+extern void blk_pm_runtime_init(struct request_queue *q, struct device *dev);
+extern int blk_pre_runtime_suspend(struct request_queue *q);
+extern void blk_post_runtime_suspend(struct request_queue *q, int err);
+extern void blk_pre_runtime_resume(struct request_queue *q);
+extern void blk_post_runtime_resume(struct request_queue *q, int err);
+#else
+static inline void blk_pm_runtime_init(struct request_queue *q,
+ struct device *dev) {}
+static inline int blk_pre_runtime_suspend(struct request_queue *q)
+{
+ return -ENOSYS;
+}
+static inline void blk_post_runtime_suspend(struct request_queue *q, int err) {}
+static inline void blk_pre_runtime_resume(struct request_queue *q) {}
+static inline void blk_post_runtime_resume(struct request_queue *q, int err) {}
+#endif
+
+/*
* blk_plug permits building a queue of related requests by holding the I/O
* fragments for a short period. This allows merging of sequential requests
* into single larger request. As the requests are moved from a per-task list to
@@ -1484,7 +1511,7 @@ static inline bool blk_integrity_is_initialized(struct gendisk *g)
struct block_device_operations {
int (*open) (struct block_device *, fmode_t);
- int (*release) (struct gendisk *, fmode_t);
+ void (*release) (struct gendisk *, fmode_t);
int (*ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
int (*compat_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
int (*direct_access) (struct block_device *, sector_t,
diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h
index 3bff9ce09cf7..5047355b9a0f 100644
--- a/include/linux/cgroup.h
+++ b/include/linux/cgroup.h
@@ -28,6 +28,7 @@ struct cgroup_subsys;
struct inode;
struct cgroup;
struct css_id;
+struct eventfd_ctx;
extern int cgroup_init_early(void);
extern int cgroup_init(void);
diff --git a/include/linux/cgroup_subsys.h b/include/linux/cgroup_subsys.h
index f204a7a9cf38..6e7ec64b69ab 100644
--- a/include/linux/cgroup_subsys.h
+++ b/include/linux/cgroup_subsys.h
@@ -78,3 +78,9 @@ SUBSYS(hugetlb)
#endif
/* */
+
+#ifdef CONFIG_CGROUP_BCACHE
+SUBSYS(bcache)
+#endif
+
+/* */
diff --git a/include/linux/cpu_cooling.h b/include/linux/cpu_cooling.h
index 40b4ef54cc7d..282e27028418 100644
--- a/include/linux/cpu_cooling.h
+++ b/include/linux/cpu_cooling.h
@@ -25,34 +25,39 @@
#define __CPU_COOLING_H__
#include <linux/thermal.h>
+#include <linux/cpumask.h>
-#define CPUFREQ_COOLING_START 0
-#define CPUFREQ_COOLING_STOP 1
-
-#if defined(CONFIG_CPU_THERMAL) || defined(CONFIG_CPU_THERMAL_MODULE)
+#ifdef CONFIG_CPU_THERMAL
/**
* cpufreq_cooling_register - function to create cpufreq cooling device.
* @clip_cpus: cpumask of cpus where the frequency constraints will happen
*/
-struct thermal_cooling_device *cpufreq_cooling_register(
- const struct cpumask *clip_cpus);
+struct thermal_cooling_device *
+cpufreq_cooling_register(const struct cpumask *clip_cpus);
/**
* cpufreq_cooling_unregister - function to remove cpufreq cooling device.
* @cdev: thermal cooling device pointer.
*/
void cpufreq_cooling_unregister(struct thermal_cooling_device *cdev);
+
+unsigned long cpufreq_cooling_get_level(unsigned int, unsigned int);
#else /* !CONFIG_CPU_THERMAL */
-static inline struct thermal_cooling_device *cpufreq_cooling_register(
- const struct cpumask *clip_cpus)
+static inline struct thermal_cooling_device *
+cpufreq_cooling_register(const struct cpumask *clip_cpus)
{
return NULL;
}
-static inline void cpufreq_cooling_unregister(
- struct thermal_cooling_device *cdev)
+static inline
+void cpufreq_cooling_unregister(struct thermal_cooling_device *cdev)
{
return;
}
+static inline
+unsigned long cpufreq_cooling_get_level(unsigned int, unsigned int)
+{
+ return THERMAL_CSTATE_INVALID;
+}
#endif /* CONFIG_CPU_THERMAL */
#endif /* __CPU_COOLING_H__ */
diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h
index 91ac8da25020..96d3e4ab11a9 100644
--- a/include/linux/dmaengine.h
+++ b/include/linux/dmaengine.h
@@ -967,8 +967,9 @@ enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie);
#ifdef CONFIG_DMA_ENGINE
enum dma_status dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx);
void dma_issue_pending_all(void);
-struct dma_chan *__dma_request_channel(dma_cap_mask_t *mask, dma_filter_fn fn, void *fn_param);
-struct dma_chan *dma_request_slave_channel(struct device *dev, char *name);
+struct dma_chan *__dma_request_channel(const dma_cap_mask_t *mask,
+ dma_filter_fn fn, void *fn_param);
+struct dma_chan *dma_request_slave_channel(struct device *dev, const char *name);
void dma_release_channel(struct dma_chan *chan);
#else
static inline enum dma_status dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx)
@@ -978,13 +979,13 @@ static inline enum dma_status dma_wait_for_async_tx(struct dma_async_tx_descript
static inline void dma_issue_pending_all(void)
{
}
-static inline struct dma_chan *__dma_request_channel(dma_cap_mask_t *mask,
+static inline struct dma_chan *__dma_request_channel(const dma_cap_mask_t *mask,
dma_filter_fn fn, void *fn_param)
{
return NULL;
}
static inline struct dma_chan *dma_request_slave_channel(struct device *dev,
- char *name)
+ const char *name)
{
return NULL;
}
@@ -1005,9 +1006,9 @@ struct dma_chan *net_dma_find_channel(void);
__dma_request_slave_channel_compat(&(mask), x, y, dev, name)
static inline struct dma_chan
-*__dma_request_slave_channel_compat(dma_cap_mask_t *mask, dma_filter_fn fn,
- void *fn_param, struct device *dev,
- char *name)
+*__dma_request_slave_channel_compat(const dma_cap_mask_t *mask,
+ dma_filter_fn fn, void *fn_param,
+ struct device *dev, char *name)
{
struct dma_chan *chan;
diff --git a/include/linux/drbd.h b/include/linux/drbd.h
index 0c5a18ec322c..1b4d4ee1168f 100644
--- a/include/linux/drbd.h
+++ b/include/linux/drbd.h
@@ -52,7 +52,7 @@
#endif
extern const char *drbd_buildtag(void);
-#define REL_VERSION "8.4.2"
+#define REL_VERSION "8.4.3"
#define API_VERSION 1
#define PRO_VERSION_MIN 86
#define PRO_VERSION_MAX 101
@@ -319,7 +319,8 @@ enum drbd_state_rv {
SS_IN_TRANSIENT_STATE = -18, /* Retry after the next state change */
SS_CONCURRENT_ST_CHG = -19, /* Concurrent cluster side state change! */
SS_O_VOL_PEER_PRI = -20,
- SS_AFTER_LAST_ERROR = -21, /* Keep this at bottom */
+ SS_OUTDATE_WO_CONN = -21,
+ SS_AFTER_LAST_ERROR = -22, /* Keep this at bottom */
};
/* from drbd_strings.c */
diff --git a/include/linux/drbd_limits.h b/include/linux/drbd_limits.h
index 1fa19c5f5e64..1fedf2b17cc8 100644
--- a/include/linux/drbd_limits.h
+++ b/include/linux/drbd_limits.h
@@ -126,13 +126,12 @@
#define DRBD_RESYNC_RATE_DEF 250
#define DRBD_RESYNC_RATE_SCALE 'k' /* kilobytes */
- /* less than 7 would hit performance unnecessarily.
- * 919 slots context information per transaction,
- * 32k activity log, 4k transaction size,
- * one transaction in flight:
- * 919 * 7 = 6433 */
+ /* less than 7 would hit performance unnecessarily. */
#define DRBD_AL_EXTENTS_MIN 7
-#define DRBD_AL_EXTENTS_MAX 6433
+ /* we use u16 as "slot number", (u16)~0 is "FREE".
+ * If you use >= 292 kB on-disk ring buffer,
+ * this is the maximum you can use: */
+#define DRBD_AL_EXTENTS_MAX 0xfffe
#define DRBD_AL_EXTENTS_DEF 1237
#define DRBD_AL_EXTENTS_SCALE '1'
diff --git a/include/linux/errno.h b/include/linux/errno.h
index f6bf082d4d4f..89627b9187f9 100644
--- a/include/linux/errno.h
+++ b/include/linux/errno.h
@@ -28,6 +28,5 @@
#define EBADTYPE 527 /* Type not supported by server */
#define EJUKEBOX 528 /* Request initiated, but will not complete before timeout */
#define EIOCBQUEUED 529 /* iocb queued, will get completion event */
-#define EIOCBRETRY 530 /* iocb queued, will trigger a retry */
#endif
diff --git a/include/linux/f2fs_fs.h b/include/linux/f2fs_fs.h
index f9a12f6243a5..df6fab82f87e 100644
--- a/include/linux/f2fs_fs.h
+++ b/include/linux/f2fs_fs.h
@@ -139,7 +139,7 @@ struct f2fs_extent {
__le32 len; /* lengh of the extent */
} __packed;
-#define F2FS_MAX_NAME_LEN 256
+#define F2FS_NAME_LEN 255
#define ADDRS_PER_INODE 923 /* Address Pointers in an Inode */
#define ADDRS_PER_BLOCK 1018 /* Address Pointers in a Direct Block */
#define NIDS_PER_BLOCK 1018 /* Node IDs in an Indirect Block */
@@ -165,7 +165,8 @@ struct f2fs_inode {
__le32 i_flags; /* file attributes */
__le32 i_pino; /* parent inode number */
__le32 i_namelen; /* file name length */
- __u8 i_name[F2FS_MAX_NAME_LEN]; /* file name for SPOR */
+ __u8 i_name[F2FS_NAME_LEN]; /* file name for SPOR */
+ __u8 i_reserved2; /* for backward compatibility */
struct f2fs_extent i_ext; /* caching a largest extent */
@@ -362,10 +363,10 @@ struct f2fs_summary_block {
typedef __le32 f2fs_hash_t;
/* One directory entry slot covers 8bytes-long file name */
-#define F2FS_NAME_LEN 8
-#define F2FS_NAME_LEN_BITS 3
+#define F2FS_SLOT_LEN 8
+#define F2FS_SLOT_LEN_BITS 3
-#define GET_DENTRY_SLOTS(x) ((x + F2FS_NAME_LEN - 1) >> F2FS_NAME_LEN_BITS)
+#define GET_DENTRY_SLOTS(x) ((x + F2FS_SLOT_LEN - 1) >> F2FS_SLOT_LEN_BITS)
/* the number of dentry in a block */
#define NR_DENTRY_IN_BLOCK 214
@@ -377,10 +378,10 @@ typedef __le32 f2fs_hash_t;
#define SIZE_OF_DENTRY_BITMAP ((NR_DENTRY_IN_BLOCK + BITS_PER_BYTE - 1) / \
BITS_PER_BYTE)
#define SIZE_OF_RESERVED (PAGE_SIZE - ((SIZE_OF_DIR_ENTRY + \
- F2FS_NAME_LEN) * \
+ F2FS_SLOT_LEN) * \
NR_DENTRY_IN_BLOCK + SIZE_OF_DENTRY_BITMAP))
-/* One directory entry slot representing F2FS_NAME_LEN-sized file name */
+/* One directory entry slot representing F2FS_SLOT_LEN-sized file name */
struct f2fs_dir_entry {
__le32 hash_code; /* hash code of file name */
__le32 ino; /* inode number */
@@ -394,7 +395,7 @@ struct f2fs_dentry_block {
__u8 dentry_bitmap[SIZE_OF_DENTRY_BITMAP];
__u8 reserved[SIZE_OF_RESERVED];
struct f2fs_dir_entry dentry[NR_DENTRY_IN_BLOCK];
- __u8 filename[NR_DENTRY_IN_BLOCK][F2FS_NAME_LEN];
+ __u8 filename[NR_DENTRY_IN_BLOCK][F2FS_SLOT_LEN];
} __packed;
/* file types used in inode_info->flags */
diff --git a/include/linux/fs.h b/include/linux/fs.h
index b5a24ba83b6f..43db02e9c9fa 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -2091,7 +2091,7 @@ extern struct block_device *blkdev_get_by_path(const char *path, fmode_t mode,
void *holder);
extern struct block_device *blkdev_get_by_dev(dev_t dev, fmode_t mode,
void *holder);
-extern int blkdev_put(struct block_device *bdev, fmode_t mode);
+extern void blkdev_put(struct block_device *bdev, fmode_t mode);
#ifdef CONFIG_SYSFS
extern int bd_link_disk_holder(struct block_device *bdev, struct gendisk *disk);
extern void bd_unlink_disk_holder(struct block_device *bdev,
diff --git a/include/linux/gpio.h b/include/linux/gpio.h
index f6c7ae3e223b..552e3f46e4a3 100644
--- a/include/linux/gpio.h
+++ b/include/linux/gpio.h
@@ -39,7 +39,7 @@ struct gpio {
const char *label;
};
-#ifdef CONFIG_GENERIC_GPIO
+#ifdef CONFIG_GPIOLIB
#ifdef CONFIG_ARCH_HAVE_CUSTOM_GPIO_H
#include <asm/gpio.h>
@@ -74,7 +74,7 @@ static inline int irq_to_gpio(unsigned int irq)
#endif /* ! CONFIG_ARCH_HAVE_CUSTOM_GPIO_H */
-#else /* ! CONFIG_GENERIC_GPIO */
+#else /* ! CONFIG_GPIOLIB */
#include <linux/kernel.h>
#include <linux/types.h>
@@ -226,7 +226,7 @@ gpiochip_remove_pin_ranges(struct gpio_chip *chip)
WARN_ON(1);
}
-#endif /* ! CONFIG_GENERIC_GPIO */
+#endif /* ! CONFIG_GPIOLIB */
struct device;
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index 3a62df310f2e..6b4890fa57e7 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -189,8 +189,7 @@ static inline struct hugetlbfs_sb_info *HUGETLBFS_SB(struct super_block *sb)
extern const struct file_operations hugetlbfs_file_operations;
extern const struct vm_operations_struct hugetlb_vm_ops;
-struct file *hugetlb_file_setup(const char *name, unsigned long addr,
- size_t size, vm_flags_t acct,
+struct file *hugetlb_file_setup(const char *name, size_t size, vm_flags_t acct,
struct user_struct **user, int creat_flags,
int page_size_log);
@@ -209,8 +208,8 @@ static inline int is_file_hugepages(struct file *file)
#define is_file_hugepages(file) 0
static inline struct file *
-hugetlb_file_setup(const char *name, unsigned long addr, size_t size,
- vm_flags_t acctflag, struct user_struct **user, int creat_flags,
+hugetlb_file_setup(const char *name, size_t size, vm_flags_t acctflag,
+ struct user_struct **user, int creat_flags,
int page_size_log)
{
return ERR_PTR(-ENOSYS);
@@ -288,6 +287,13 @@ static inline struct hstate *hstate_file(struct file *f)
return hstate_inode(file_inode(f));
}
+static inline struct hstate *hstate_sizelog(int page_size_log)
+{
+ if (!page_size_log)
+ return &default_hstate;
+ return size_to_hstate(1 << page_size_log);
+}
+
static inline struct hstate *hstate_vma(struct vm_area_struct *vma)
{
return hstate_file(vma->vm_file);
@@ -352,11 +358,12 @@ static inline int hstate_index(struct hstate *h)
return h - hstates;
}
-#else
+#else /* CONFIG_HUGETLB_PAGE */
struct hstate {};
#define alloc_huge_page_node(h, nid) NULL
#define alloc_bootmem_huge_page(h) NULL
#define hstate_file(f) NULL
+#define hstate_sizelog(s) NULL
#define hstate_vma(v) NULL
#define hstate_inode(i) NULL
#define huge_page_size(h) PAGE_SIZE
@@ -371,6 +378,6 @@ static inline unsigned int pages_per_huge_page(struct hstate *h)
}
#define hstate_index_to_shift(index) 0
#define hstate_index(h) 0
-#endif
+#endif /* CONFIG_HUGETLB_PAGE */
#endif /* _LINUX_HUGETLB_H */
diff --git a/include/linux/idr.h b/include/linux/idr.h
index a470ac3ef49d..871a213a8477 100644
--- a/include/linux/idr.h
+++ b/include/linux/idr.h
@@ -124,11 +124,13 @@ static inline void *idr_find(struct idr *idr, int id)
* @idp: idr handle
* @entry: the type * to use as cursor
* @id: id entry's key
+ *
+ * @entry and @id do not need to be initialized before the loop, and
+ * after normal terminatinon @entry is left with the value NULL. This
+ * is convenient for a "not found" value.
*/
-#define idr_for_each_entry(idp, entry, id) \
- for (id = 0, entry = (typeof(entry))idr_get_next((idp), &(id)); \
- entry != NULL; \
- ++id, entry = (typeof(entry))idr_get_next((idp), &(id)))
+#define idr_for_each_entry(idp, entry, id) \
+ for (id = 0; ((entry) = idr_get_next(idp, &(id))) != NULL; ++id)
/*
* Don't use the following functions. These exist only to suppress
diff --git a/include/linux/kref.h b/include/linux/kref.h
index 4972e6e9ca93..e15828fd71f1 100644
--- a/include/linux/kref.h
+++ b/include/linux/kref.h
@@ -39,8 +39,11 @@ static inline void kref_init(struct kref *kref)
*/
static inline void kref_get(struct kref *kref)
{
- WARN_ON(!atomic_read(&kref->refcount));
- atomic_inc(&kref->refcount);
+ /* If refcount was 0 before incrementing then we have a race
+ * condition when this kref is freeing by some other thread right now.
+ * In this case one should use kref_get_unless_zero()
+ */
+ WARN_ON_ONCE(atomic_inc_return(&kref->refcount) < 2);
}
/**
@@ -100,7 +103,7 @@ static inline int kref_put_mutex(struct kref *kref,
struct mutex *lock)
{
WARN_ON(release == NULL);
- if (unlikely(!atomic_add_unless(&kref->refcount, -1, 1))) {
+ if (unlikely(!atomic_add_unless(&kref->refcount, -1, 1))) {
mutex_lock(lock);
if (unlikely(!atomic_dec_and_test(&kref->refcount))) {
mutex_unlock(lock);
diff --git a/include/linux/lru_cache.h b/include/linux/lru_cache.h
index 4019013c6593..46262284de47 100644
--- a/include/linux/lru_cache.h
+++ b/include/linux/lru_cache.h
@@ -256,6 +256,7 @@ extern void lc_destroy(struct lru_cache *lc);
extern void lc_set(struct lru_cache *lc, unsigned int enr, int index);
extern void lc_del(struct lru_cache *lc, struct lc_element *element);
+extern struct lc_element *lc_get_cumulative(struct lru_cache *lc, unsigned int enr);
extern struct lc_element *lc_try_get(struct lru_cache *lc, unsigned int enr);
extern struct lc_element *lc_find(struct lru_cache *lc, unsigned int enr);
extern struct lc_element *lc_get(struct lru_cache *lc, unsigned int enr);
diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h
index 53acaf64189f..a51b0134ce18 100644
--- a/include/linux/mlx4/device.h
+++ b/include/linux/mlx4/device.h
@@ -903,11 +903,12 @@ static inline int map_hw_to_sw_id(u16 header_id)
}
enum mlx4_net_trans_promisc_mode {
- MLX4_FS_PROMISC_NONE = 0,
- MLX4_FS_PROMISC_UPLINK,
- /* For future use. Not implemented yet */
- MLX4_FS_PROMISC_FUNCTION_PORT,
- MLX4_FS_PROMISC_ALL_MULTI,
+ MLX4_FS_REGULAR = 1,
+ MLX4_FS_ALL_DEFAULT,
+ MLX4_FS_MC_DEFAULT,
+ MLX4_FS_UC_SNIFFER,
+ MLX4_FS_MC_SNIFFER,
+ MLX4_FS_MODE_NUM, /* should be last */
};
struct mlx4_spec_eth {
@@ -936,7 +937,7 @@ struct mlx4_spec_ipv4 {
};
struct mlx4_spec_ib {
- __be32 r_qpn;
+ __be32 l3_qpn;
__be32 qpn_msk;
u8 dst_gid[16];
u8 dst_gid_msk[16];
@@ -969,6 +970,92 @@ struct mlx4_net_trans_rule {
u32 qpn;
};
+struct mlx4_net_trans_rule_hw_ctrl {
+ __be16 prio;
+ u8 type;
+ u8 flags;
+ u8 rsvd1;
+ u8 funcid;
+ u8 vep;
+ u8 port;
+ __be32 qpn;
+ __be32 rsvd2;
+};
+
+struct mlx4_net_trans_rule_hw_ib {
+ u8 size;
+ u8 rsvd1;
+ __be16 id;
+ u32 rsvd2;
+ __be32 l3_qpn;
+ __be32 qpn_mask;
+ u8 dst_gid[16];
+ u8 dst_gid_msk[16];
+} __packed;
+
+struct mlx4_net_trans_rule_hw_eth {
+ u8 size;
+ u8 rsvd;
+ __be16 id;
+ u8 rsvd1[6];
+ u8 dst_mac[6];
+ u16 rsvd2;
+ u8 dst_mac_msk[6];
+ u16 rsvd3;
+ u8 src_mac[6];
+ u16 rsvd4;
+ u8 src_mac_msk[6];
+ u8 rsvd5;
+ u8 ether_type_enable;
+ __be16 ether_type;
+ __be16 vlan_tag_msk;
+ __be16 vlan_tag;
+} __packed;
+
+struct mlx4_net_trans_rule_hw_tcp_udp {
+ u8 size;
+ u8 rsvd;
+ __be16 id;
+ __be16 rsvd1[3];
+ __be16 dst_port;
+ __be16 rsvd2;
+ __be16 dst_port_msk;
+ __be16 rsvd3;
+ __be16 src_port;
+ __be16 rsvd4;
+ __be16 src_port_msk;
+} __packed;
+
+struct mlx4_net_trans_rule_hw_ipv4 {
+ u8 size;
+ u8 rsvd;
+ __be16 id;
+ __be32 rsvd1;
+ __be32 dst_ip;
+ __be32 dst_ip_msk;
+ __be32 src_ip;
+ __be32 src_ip_msk;
+} __packed;
+
+struct _rule_hw {
+ union {
+ struct {
+ u8 size;
+ u8 rsvd;
+ __be16 id;
+ };
+ struct mlx4_net_trans_rule_hw_eth eth;
+ struct mlx4_net_trans_rule_hw_ib ib;
+ struct mlx4_net_trans_rule_hw_ipv4 ipv4;
+ struct mlx4_net_trans_rule_hw_tcp_udp tcp_udp;
+ };
+};
+
+/* translating DMFS verbs sniffer rule to the FW API would need two reg IDs */
+struct mlx4_flow_handle {
+ u64 reg_id[2];
+};
+
int mlx4_flow_steer_promisc_add(struct mlx4_dev *dev, u8 port, u32 qpn,
enum mlx4_net_trans_promisc_mode mode);
int mlx4_flow_steer_promisc_remove(struct mlx4_dev *dev, u8 port,
@@ -1018,6 +1105,11 @@ void mlx4_counter_free(struct mlx4_dev *dev, u32 idx);
int mlx4_flow_attach(struct mlx4_dev *dev,
struct mlx4_net_trans_rule *rule, u64 *reg_id);
int mlx4_flow_detach(struct mlx4_dev *dev, u64 reg_id);
+int mlx4_map_sw_to_hw_steering_mode(struct mlx4_dev *dev,
+ enum mlx4_net_trans_promisc_mode flow_type);
+int mlx4_map_sw_to_hw_steering_id(struct mlx4_dev *dev,
+ enum mlx4_net_trans_rule_id id);
+int mlx4_hw_rule_sz(struct mlx4_dev *dev, enum mlx4_net_trans_rule_id id);
void mlx4_sync_pkey_table(struct mlx4_dev *dev, int slave, int port,
int i, int val);
diff --git a/include/linux/mlx4/srq.h b/include/linux/mlx4/srq.h
index 799a0697a383..192e0f7784f2 100644
--- a/include/linux/mlx4/srq.h
+++ b/include/linux/mlx4/srq.h
@@ -39,4 +39,6 @@ struct mlx4_wqe_srq_next_seg {
u32 reserved2[3];
};
+struct mlx4_srq *mlx4_srq_lookup(struct mlx4_dev *dev, u32 srqn);
+
#endif /* MLX4_SRQ_H */
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 1a7f19e7f1a0..e0c8528a41a4 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -951,13 +951,19 @@ void unmap_vmas(struct mmu_gather *tlb, struct vm_area_struct *start_vma,
* (see walk_page_range for more details)
*/
struct mm_walk {
- int (*pgd_entry)(pgd_t *, unsigned long, unsigned long, struct mm_walk *);
- int (*pud_entry)(pud_t *, unsigned long, unsigned long, struct mm_walk *);
- int (*pmd_entry)(pmd_t *, unsigned long, unsigned long, struct mm_walk *);
- int (*pte_entry)(pte_t *, unsigned long, unsigned long, struct mm_walk *);
- int (*pte_hole)(unsigned long, unsigned long, struct mm_walk *);
- int (*hugetlb_entry)(pte_t *, unsigned long,
- unsigned long, unsigned long, struct mm_walk *);
+ int (*pgd_entry)(pgd_t *pgd, unsigned long addr,
+ unsigned long next, struct mm_walk *walk);
+ int (*pud_entry)(pud_t *pud, unsigned long addr,
+ unsigned long next, struct mm_walk *walk);
+ int (*pmd_entry)(pmd_t *pmd, unsigned long addr,
+ unsigned long next, struct mm_walk *walk);
+ int (*pte_entry)(pte_t *pte, unsigned long addr,
+ unsigned long next, struct mm_walk *walk);
+ int (*pte_hole)(unsigned long addr, unsigned long next,
+ struct mm_walk *walk);
+ int (*hugetlb_entry)(pte_t *pte, unsigned long hmask,
+ unsigned long addr, unsigned long next,
+ struct mm_walk *walk);
struct mm_struct *mm;
void *private;
};
diff --git a/include/linux/mtd/blktrans.h b/include/linux/mtd/blktrans.h
index 4eb0a50d0c55..e93837f647de 100644
--- a/include/linux/mtd/blktrans.h
+++ b/include/linux/mtd/blktrans.h
@@ -74,7 +74,7 @@ struct mtd_blktrans_ops {
/* Called with mtd_table_mutex held; no race with add/remove */
int (*open)(struct mtd_blktrans_dev *dev);
- int (*release)(struct mtd_blktrans_dev *dev);
+ void (*release)(struct mtd_blktrans_dev *dev);
/* Called on {de,}registration and on subsequent addition/removal
of devices, with mtd_table_mutex held. */
diff --git a/include/linux/mtd/mtd.h b/include/linux/mtd/mtd.h
index f9ac2897b86b..a5cf4e8d6818 100644
--- a/include/linux/mtd/mtd.h
+++ b/include/linux/mtd/mtd.h
@@ -362,10 +362,10 @@ struct mtd_partition;
struct mtd_part_parser_data;
extern int mtd_device_parse_register(struct mtd_info *mtd,
- const char **part_probe_types,
- struct mtd_part_parser_data *parser_data,
- const struct mtd_partition *defparts,
- int defnr_parts);
+ const char * const *part_probe_types,
+ struct mtd_part_parser_data *parser_data,
+ const struct mtd_partition *defparts,
+ int defnr_parts);
#define mtd_device_register(master, parts, nr_parts) \
mtd_device_parse_register(master, NULL, NULL, parts, nr_parts)
extern int mtd_device_unregister(struct mtd_info *master);
diff --git a/include/linux/mtd/nand.h b/include/linux/mtd/nand.h
index ef52d9c91459..ab6363443ce8 100644
--- a/include/linux/mtd/nand.h
+++ b/include/linux/mtd/nand.h
@@ -86,7 +86,6 @@ extern int nand_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
#define NAND_CMD_READOOB 0x50
#define NAND_CMD_ERASE1 0x60
#define NAND_CMD_STATUS 0x70
-#define NAND_CMD_STATUS_MULTI 0x71
#define NAND_CMD_SEQIN 0x80
#define NAND_CMD_RNDIN 0x85
#define NAND_CMD_READID 0x90
@@ -105,25 +104,6 @@ extern int nand_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
#define NAND_CMD_RNDOUTSTART 0xE0
#define NAND_CMD_CACHEDPROG 0x15
-/* Extended commands for AG-AND device */
-/*
- * Note: the command for NAND_CMD_DEPLETE1 is really 0x00 but
- * there is no way to distinguish that from NAND_CMD_READ0
- * until the remaining sequence of commands has been completed
- * so add a high order bit and mask it off in the command.
- */
-#define NAND_CMD_DEPLETE1 0x100
-#define NAND_CMD_DEPLETE2 0x38
-#define NAND_CMD_STATUS_MULTI 0x71
-#define NAND_CMD_STATUS_ERROR 0x72
-/* multi-bank error status (banks 0-3) */
-#define NAND_CMD_STATUS_ERROR0 0x73
-#define NAND_CMD_STATUS_ERROR1 0x74
-#define NAND_CMD_STATUS_ERROR2 0x75
-#define NAND_CMD_STATUS_ERROR3 0x76
-#define NAND_CMD_STATUS_RESET 0x7f
-#define NAND_CMD_STATUS_CLEAR 0xff
-
#define NAND_CMD_NONE -1
/* Status bits */
@@ -165,28 +145,8 @@ typedef enum {
*/
/* Buswidth is 16 bit */
#define NAND_BUSWIDTH_16 0x00000002
-/* Device supports partial programming without padding */
-#define NAND_NO_PADDING 0x00000004
/* Chip has cache program function */
#define NAND_CACHEPRG 0x00000008
-/* Chip has copy back function */
-#define NAND_COPYBACK 0x00000010
-/*
- * AND Chip which has 4 banks and a confusing page / block
- * assignment. See Renesas datasheet for further information.
- */
-#define NAND_IS_AND 0x00000020
-/*
- * Chip has a array of 4 pages which can be read without
- * additional ready /busy waits.
- */
-#define NAND_4PAGE_ARRAY 0x00000040
-/*
- * Chip requires that BBT is periodically rewritten to prevent
- * bits from adjacent blocks from 'leaking' in altering data.
- * This happens with the Renesas AG-AND chips, possibly others.
- */
-#define BBT_AUTO_REFRESH 0x00000080
/*
* Chip requires ready check on read (for auto-incremented sequential read).
* True only for small page devices; large page devices do not support
@@ -207,13 +167,10 @@ typedef enum {
#define NAND_SUBPAGE_READ 0x00001000
/* Options valid for Samsung large page devices */
-#define NAND_SAMSUNG_LP_OPTIONS \
- (NAND_NO_PADDING | NAND_CACHEPRG | NAND_COPYBACK)
+#define NAND_SAMSUNG_LP_OPTIONS NAND_CACHEPRG
/* Macros to identify the above */
-#define NAND_MUST_PAD(chip) (!(chip->options & NAND_NO_PADDING))
#define NAND_HAS_CACHEPROG(chip) ((chip->options & NAND_CACHEPRG))
-#define NAND_HAS_COPYBACK(chip) ((chip->options & NAND_COPYBACK))
#define NAND_HAS_SUBPAGE_READ(chip) ((chip->options & NAND_SUBPAGE_READ))
/* Non chip related options */
@@ -361,6 +318,7 @@ struct nand_hw_control {
* any single ECC step, 0 if bitflips uncorrectable, -EIO hw error
* @read_subpage: function to read parts of the page covered by ECC;
* returns same as read_page()
+ * @write_subpage: function to write parts of the page covered by ECC.
* @write_page: function to write a page according to the ECC generator
* requirements.
* @write_oob_raw: function to write chip OOB data without ECC
@@ -392,6 +350,9 @@ struct nand_ecc_ctrl {
uint8_t *buf, int oob_required, int page);
int (*read_subpage)(struct mtd_info *mtd, struct nand_chip *chip,
uint32_t offs, uint32_t len, uint8_t *buf);
+ int (*write_subpage)(struct mtd_info *mtd, struct nand_chip *chip,
+ uint32_t offset, uint32_t data_len,
+ const uint8_t *data_buf, int oob_required);
int (*write_page)(struct mtd_info *mtd, struct nand_chip *chip,
const uint8_t *buf, int oob_required);
int (*write_oob_raw)(struct mtd_info *mtd, struct nand_chip *chip,
@@ -527,8 +488,8 @@ struct nand_chip {
int (*errstat)(struct mtd_info *mtd, struct nand_chip *this, int state,
int status, int page);
int (*write_page)(struct mtd_info *mtd, struct nand_chip *chip,
- const uint8_t *buf, int oob_required, int page,
- int cached, int raw);
+ uint32_t offset, int data_len, const uint8_t *buf,
+ int oob_required, int page, int cached, int raw);
int (*onfi_set_features)(struct mtd_info *mtd, struct nand_chip *chip,
int feature_addr, uint8_t *subfeature_para);
int (*onfi_get_features)(struct mtd_info *mtd, struct nand_chip *chip,
@@ -589,25 +550,65 @@ struct nand_chip {
#define NAND_MFR_MACRONIX 0xc2
#define NAND_MFR_EON 0x92
+/* The maximum expected count of bytes in the NAND ID sequence */
+#define NAND_MAX_ID_LEN 8
+
+/*
+ * A helper for defining older NAND chips where the second ID byte fully
+ * defined the chip, including the geometry (chip size, eraseblock size, page
+ * size). All these chips have 512 bytes NAND page size.
+ */
+#define LEGACY_ID_NAND(nm, devid, chipsz, erasesz, opts) \
+ { .name = (nm), {{ .dev_id = (devid) }}, .pagesize = 512, \
+ .chipsize = (chipsz), .erasesize = (erasesz), .options = (opts) }
+
+/*
+ * A helper for defining newer chips which report their page size and
+ * eraseblock size via the extended ID bytes.
+ *
+ * The real difference between LEGACY_ID_NAND and EXTENDED_ID_NAND is that with
+ * EXTENDED_ID_NAND, manufacturers overloaded the same device ID so that the
+ * device ID now only represented a particular total chip size (and voltage,
+ * buswidth), and the page size, eraseblock size, and OOB size could vary while
+ * using the same device ID.
+ */
+#define EXTENDED_ID_NAND(nm, devid, chipsz, opts) \
+ { .name = (nm), {{ .dev_id = (devid) }}, .chipsize = (chipsz), \
+ .options = (opts) }
+
/**
* struct nand_flash_dev - NAND Flash Device ID Structure
- * @name: Identify the device type
- * @id: device ID code
- * @pagesize: Pagesize in bytes. Either 256 or 512 or 0
- * If the pagesize is 0, then the real pagesize
- * and the eraseize are determined from the
- * extended id bytes in the chip
- * @erasesize: Size of an erase block in the flash device.
- * @chipsize: Total chipsize in Mega Bytes
- * @options: Bitfield to store chip relevant options
+ * @name: a human-readable name of the NAND chip
+ * @dev_id: the device ID (the second byte of the full chip ID array)
+ * @mfr_id: manufecturer ID part of the full chip ID array (refers the same
+ * memory address as @id[0])
+ * @dev_id: device ID part of the full chip ID array (refers the same memory
+ * address as @id[1])
+ * @id: full device ID array
+ * @pagesize: size of the NAND page in bytes; if 0, then the real page size (as
+ * well as the eraseblock size) is determined from the extended NAND
+ * chip ID array)
+ * @chipsize: total chip size in MiB
+ * @erasesize: eraseblock size in bytes (determined from the extended ID if 0)
+ * @options: stores various chip bit options
+ * @id_len: The valid length of the @id.
+ * @oobsize: OOB size
*/
struct nand_flash_dev {
char *name;
- int id;
- unsigned long pagesize;
- unsigned long chipsize;
- unsigned long erasesize;
- unsigned long options;
+ union {
+ struct {
+ uint8_t mfr_id;
+ uint8_t dev_id;
+ };
+ uint8_t id[NAND_MAX_ID_LEN];
+ };
+ unsigned int pagesize;
+ unsigned int chipsize;
+ unsigned int erasesize;
+ unsigned int options;
+ uint16_t id_len;
+ uint16_t oobsize;
};
/**
diff --git a/include/linux/mtd/physmap.h b/include/linux/mtd/physmap.h
index d2887e76b7f6..aa6a2633c2da 100644
--- a/include/linux/mtd/physmap.h
+++ b/include/linux/mtd/physmap.h
@@ -30,7 +30,7 @@ struct physmap_flash_data {
unsigned int pfow_base;
char *probe_type;
struct mtd_partition *parts;
- const char **part_probe_types;
+ const char * const *part_probe_types;
};
#endif /* __LINUX_MTD_PHYSMAP__ */
diff --git a/include/linux/mtd/plat-ram.h b/include/linux/mtd/plat-ram.h
index e07890aff1cf..44212d65aa97 100644
--- a/include/linux/mtd/plat-ram.h
+++ b/include/linux/mtd/plat-ram.h
@@ -20,8 +20,8 @@
struct platdata_mtd_ram {
const char *mapname;
- const char **map_probes;
- const char **probes;
+ const char * const *map_probes;
+ const char * const *probes;
struct mtd_partition *partitions;
int nr_partitions;
int bankwidth;
diff --git a/include/linux/of_dma.h b/include/linux/of_dma.h
index d15073e080dd..364dda734877 100644
--- a/include/linux/of_dma.h
+++ b/include/linux/of_dma.h
@@ -25,7 +25,6 @@ struct of_dma {
struct dma_chan *(*of_dma_xlate)
(struct of_phandle_args *, struct of_dma *);
void *of_dma_data;
- int use_count;
};
struct of_dma_filter_info {
@@ -38,9 +37,9 @@ extern int of_dma_controller_register(struct device_node *np,
struct dma_chan *(*of_dma_xlate)
(struct of_phandle_args *, struct of_dma *),
void *data);
-extern int of_dma_controller_free(struct device_node *np);
+extern void of_dma_controller_free(struct device_node *np);
extern struct dma_chan *of_dma_request_slave_channel(struct device_node *np,
- char *name);
+ const char *name);
extern struct dma_chan *of_dma_simple_xlate(struct of_phandle_args *dma_spec,
struct of_dma *ofdma);
#else
@@ -52,13 +51,12 @@ static inline int of_dma_controller_register(struct device_node *np,
return -ENODEV;
}
-static inline int of_dma_controller_free(struct device_node *np)
+static inline void of_dma_controller_free(struct device_node *np)
{
- return -ENODEV;
}
static inline struct dma_chan *of_dma_request_slave_channel(struct device_node *np,
- char *name)
+ const char *name)
{
return NULL;
}
diff --git a/include/linux/pid_namespace.h b/include/linux/pid_namespace.h
index 731e4ecee3bd..e2772666f004 100644
--- a/include/linux/pid_namespace.h
+++ b/include/linux/pid_namespace.h
@@ -4,6 +4,7 @@
#include <linux/sched.h>
#include <linux/bug.h>
#include <linux/mm.h>
+#include <linux/workqueue.h>
#include <linux/threads.h>
#include <linux/nsproxy.h>
#include <linux/kref.h>
diff --git a/include/linux/platform_data/elm.h b/include/linux/platform_data/elm.h
index 1bd5244d1dcd..bf0a83b7ed9d 100644
--- a/include/linux/platform_data/elm.h
+++ b/include/linux/platform_data/elm.h
@@ -50,5 +50,5 @@ struct elm_errorvec {
void elm_decode_bch_error_page(struct device *dev, u8 *ecc_calc,
struct elm_errorvec *err_vec);
-void elm_config(struct device *dev, enum bch_ecc bch_type);
+int elm_config(struct device *dev, enum bch_ecc bch_type);
#endif /* __ELM_H */
diff --git a/include/linux/random.h b/include/linux/random.h
index 347ce553a306..3b9377d6b7a5 100644
--- a/include/linux/random.h
+++ b/include/linux/random.h
@@ -29,13 +29,6 @@ u32 prandom_u32(void);
void prandom_bytes(void *buf, int nbytes);
void prandom_seed(u32 seed);
-/*
- * These macros are preserved for backward compatibility and should be
- * removed as soon as a transition is finished.
- */
-#define random32() prandom_u32()
-#define srandom32(seed) prandom_seed(seed)
-
u32 prandom_u32_state(struct rnd_state *);
void prandom_bytes_state(struct rnd_state *state, void *buf, int nbytes);
diff --git a/include/linux/rwsem.h b/include/linux/rwsem.h
index 8da67d625e13..0616ffe45702 100644
--- a/include/linux/rwsem.h
+++ b/include/linux/rwsem.h
@@ -133,10 +133,20 @@ do { \
_down_write_nest_lock(sem, &(nest_lock)->dep_map); \
} while (0);
+/*
+ * Take/release a lock when not the owner will release it.
+ *
+ * [ This API should be avoided as much as possible - the
+ * proper abstraction for this case is completions. ]
+ */
+extern void down_read_non_owner(struct rw_semaphore *sem);
+extern void up_read_non_owner(struct rw_semaphore *sem);
#else
# define down_read_nested(sem, subclass) down_read(sem)
# define down_write_nest_lock(sem, nest_lock) down_write(sem)
# define down_write_nested(sem, subclass) down_write(sem)
+# define down_read_non_owner(sem) down_read(sem)
+# define up_read_non_owner(sem) up_read(sem)
#endif
#endif /* _LINUX_RWSEM_H */
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 4800e9d1864c..caa8f4d0186b 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -313,8 +313,6 @@ extern void schedule_preempt_disabled(void);
struct nsproxy;
struct user_namespace;
-#include <linux/aio.h>
-
#ifdef CONFIG_MMU
extern void arch_pick_mmap_layout(struct mm_struct *mm);
extern unsigned long
@@ -1413,6 +1411,10 @@ struct task_struct {
#ifdef CONFIG_UPROBES
struct uprobe_task *utask;
#endif
+#if defined(CONFIG_BCACHE) || defined(CONFIG_BCACHE_MODULE)
+ unsigned int sequential_io;
+ unsigned int sequential_io_avg;
+#endif
};
/* Future-safe accessor for struct task_struct's cpus_allowed. */
diff --git a/include/linux/sudmac.h b/include/linux/sudmac.h
new file mode 100644
index 000000000000..377b8a5788fa
--- /dev/null
+++ b/include/linux/sudmac.h
@@ -0,0 +1,52 @@
+/*
+ * Header for the SUDMAC driver
+ *
+ * Copyright (C) 2013 Renesas Solutions Corp.
+ *
+ * This is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ */
+#ifndef SUDMAC_H
+#define SUDMAC_H
+
+#include <linux/dmaengine.h>
+#include <linux/shdma-base.h>
+#include <linux/types.h>
+
+/* Used by slave DMA clients to request DMA to/from a specific peripheral */
+struct sudmac_slave {
+ struct shdma_slave shdma_slave; /* Set by the platform */
+};
+
+/*
+ * Supplied by platforms to specify, how a DMA channel has to be configured for
+ * a certain peripheral
+ */
+struct sudmac_slave_config {
+ int slave_id;
+};
+
+struct sudmac_channel {
+ unsigned long offset;
+ unsigned long config;
+ unsigned long wait; /* The configuable range is 0 to 3 */
+ unsigned long dint_end_bit;
+};
+
+struct sudmac_pdata {
+ const struct sudmac_slave_config *slave;
+ int slave_num;
+ const struct sudmac_channel *channel;
+ int channel_num;
+};
+
+/* Definitions for the sudmac_channel.config */
+#define SUDMAC_TX_BUFFER_MODE BIT(0)
+#define SUDMAC_RX_END_MODE BIT(1)
+
+/* Definitions for the sudmac_channel.dint_end_bit */
+#define SUDMAC_DMA_BIT_CH0 BIT(0)
+#define SUDMAC_DMA_BIT_CH1 BIT(1)
+
+#endif
diff --git a/include/linux/thermal.h b/include/linux/thermal.h
index e3c0ae9bb1fa..a386a1cbb6e1 100644
--- a/include/linux/thermal.h
+++ b/include/linux/thermal.h
@@ -33,8 +33,11 @@
#define THERMAL_MAX_TRIPS 12
#define THERMAL_NAME_LENGTH 20
+/* invalid cooling state */
+#define THERMAL_CSTATE_INVALID -1UL
+
/* No upper/lower limit requirement */
-#define THERMAL_NO_LIMIT -1UL
+#define THERMAL_NO_LIMIT THERMAL_CSTATE_INVALID
/* Unit conversion macros */
#define KELVIN_TO_CELSIUS(t) (long)(((long)t-2732 >= 0) ? \
@@ -184,7 +187,6 @@ struct thermal_governor {
char name[THERMAL_NAME_LENGTH];
int (*throttle)(struct thermal_zone_device *tz, int trip);
struct list_head governor_list;
- struct module *owner;
};
/* Structure that holds binding parameters for a zone */
@@ -237,21 +239,20 @@ void thermal_zone_device_update(struct thermal_zone_device *);
struct thermal_cooling_device *thermal_cooling_device_register(char *, void *,
const struct thermal_cooling_device_ops *);
void thermal_cooling_device_unregister(struct thermal_cooling_device *);
+struct thermal_zone_device *thermal_zone_get_zone_by_name(const char *name);
+int thermal_zone_get_temp(struct thermal_zone_device *tz, unsigned long *temp);
int get_tz_trend(struct thermal_zone_device *, int);
struct thermal_instance *get_thermal_instance(struct thermal_zone_device *,
struct thermal_cooling_device *, int);
void thermal_cdev_update(struct thermal_cooling_device *);
-void notify_thermal_framework(struct thermal_zone_device *, int);
-
-int thermal_register_governor(struct thermal_governor *);
-void thermal_unregister_governor(struct thermal_governor *);
+void thermal_notify_framework(struct thermal_zone_device *, int);
#ifdef CONFIG_NET
extern int thermal_generate_netlink_event(struct thermal_zone_device *tz,
enum events event);
#else
-static int thermal_generate_netlink_event(struct thermal_zone_device *tz,
+static inline int thermal_generate_netlink_event(struct thermal_zone_device *tz,
enum events event)
{
return 0;
diff --git a/include/linux/wait.h b/include/linux/wait.h
index 7cb64d4b499d..ac38be2692d8 100644
--- a/include/linux/wait.h
+++ b/include/linux/wait.h
@@ -330,6 +330,92 @@ do { \
__ret; \
})
+#define __wait_event_hrtimeout(wq, condition, timeout, state) \
+({ \
+ int __ret = 0; \
+ DEFINE_WAIT(__wait); \
+ struct hrtimer_sleeper __t; \
+ \
+ hrtimer_init_on_stack(&__t.timer, CLOCK_MONOTONIC, \
+ HRTIMER_MODE_REL); \
+ hrtimer_init_sleeper(&__t, current); \
+ if ((timeout).tv64 != KTIME_MAX) \
+ hrtimer_start_range_ns(&__t.timer, timeout, \
+ current->timer_slack_ns, \
+ HRTIMER_MODE_REL); \
+ \
+ for (;;) { \
+ prepare_to_wait(&wq, &__wait, state); \
+ if (condition) \
+ break; \
+ if (state == TASK_INTERRUPTIBLE && \
+ signal_pending(current)) { \
+ __ret = -ERESTARTSYS; \
+ break; \
+ } \
+ if (!__t.task) { \
+ __ret = -ETIME; \
+ break; \
+ } \
+ schedule(); \
+ } \
+ \
+ hrtimer_cancel(&__t.timer); \
+ destroy_hrtimer_on_stack(&__t.timer); \
+ finish_wait(&wq, &__wait); \
+ __ret; \
+})
+
+/**
+ * wait_event_hrtimeout - sleep until a condition gets true or a timeout elapses
+ * @wq: the waitqueue to wait on
+ * @condition: a C expression for the event to wait for
+ * @timeout: timeout, as a ktime_t
+ *
+ * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
+ * @condition evaluates to true or a signal is received.
+ * The @condition is checked each time the waitqueue @wq is woken up.
+ *
+ * wake_up() has to be called after changing any variable that could
+ * change the result of the wait condition.
+ *
+ * The function returns 0 if @condition became true, or -ETIME if the timeout
+ * elapsed.
+ */
+#define wait_event_hrtimeout(wq, condition, timeout) \
+({ \
+ int __ret = 0; \
+ if (!(condition)) \
+ __ret = __wait_event_hrtimeout(wq, condition, timeout, \
+ TASK_UNINTERRUPTIBLE); \
+ __ret; \
+})
+
+/**
+ * wait_event_interruptible_hrtimeout - sleep until a condition gets true or a timeout elapses
+ * @wq: the waitqueue to wait on
+ * @condition: a C expression for the event to wait for
+ * @timeout: timeout, as a ktime_t
+ *
+ * The process is put to sleep (TASK_INTERRUPTIBLE) until the
+ * @condition evaluates to true or a signal is received.
+ * The @condition is checked each time the waitqueue @wq is woken up.
+ *
+ * wake_up() has to be called after changing any variable that could
+ * change the result of the wait condition.
+ *
+ * The function returns 0 if @condition became true, -ERESTARTSYS if it was
+ * interrupted by a signal, or -ETIME if the timeout elapsed.
+ */
+#define wait_event_interruptible_hrtimeout(wq, condition, timeout) \
+({ \
+ long __ret = 0; \
+ if (!(condition)) \
+ __ret = __wait_event_hrtimeout(wq, condition, timeout, \
+ TASK_INTERRUPTIBLE); \
+ __ret; \
+})
+
#define __wait_event_interruptible_exclusive(wq, condition, ret) \
do { \
DEFINE_WAIT(__wait); \
diff --git a/include/linux/writeback.h b/include/linux/writeback.h
index 9a9367c0c076..579a5007c696 100644
--- a/include/linux/writeback.h
+++ b/include/linux/writeback.h
@@ -5,6 +5,7 @@
#define WRITEBACK_H
#include <linux/sched.h>
+#include <linux/workqueue.h>
#include <linux/fs.h>
DECLARE_PER_CPU(int, dirty_throttle_leaks);
diff --git a/include/trace/events/bcache.h b/include/trace/events/bcache.h
new file mode 100644
index 000000000000..3cc5a0b278c3
--- /dev/null
+++ b/include/trace/events/bcache.h
@@ -0,0 +1,271 @@
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM bcache
+
+#if !defined(_TRACE_BCACHE_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_BCACHE_H
+
+#include <linux/tracepoint.h>
+
+struct search;
+
+DECLARE_EVENT_CLASS(bcache_request,
+
+ TP_PROTO(struct search *s, struct bio *bio),
+
+ TP_ARGS(s, bio),
+
+ TP_STRUCT__entry(
+ __field(dev_t, dev )
+ __field(unsigned int, orig_major )
+ __field(unsigned int, orig_minor )
+ __field(sector_t, sector )
+ __field(dev_t, orig_sector )
+ __field(unsigned int, nr_sector )
+ __array(char, rwbs, 6 )
+ __array(char, comm, TASK_COMM_LEN )
+ ),
+
+ TP_fast_assign(
+ __entry->dev = bio->bi_bdev->bd_dev;
+ __entry->orig_major = s->d->disk->major;
+ __entry->orig_minor = s->d->disk->first_minor;
+ __entry->sector = bio->bi_sector;
+ __entry->orig_sector = bio->bi_sector - 16;
+ __entry->nr_sector = bio->bi_size >> 9;
+ blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size);
+ memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
+ ),
+
+ TP_printk("%d,%d %s %llu + %u [%s] (from %d,%d @ %llu)",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ __entry->rwbs,
+ (unsigned long long)__entry->sector,
+ __entry->nr_sector, __entry->comm,
+ __entry->orig_major, __entry->orig_minor,
+ (unsigned long long)__entry->orig_sector)
+);
+
+DEFINE_EVENT(bcache_request, bcache_request_start,
+
+ TP_PROTO(struct search *s, struct bio *bio),
+
+ TP_ARGS(s, bio)
+);
+
+DEFINE_EVENT(bcache_request, bcache_request_end,
+
+ TP_PROTO(struct search *s, struct bio *bio),
+
+ TP_ARGS(s, bio)
+);
+
+DECLARE_EVENT_CLASS(bcache_bio,
+
+ TP_PROTO(struct bio *bio),
+
+ TP_ARGS(bio),
+
+ TP_STRUCT__entry(
+ __field(dev_t, dev )
+ __field(sector_t, sector )
+ __field(unsigned int, nr_sector )
+ __array(char, rwbs, 6 )
+ __array(char, comm, TASK_COMM_LEN )
+ ),
+
+ TP_fast_assign(
+ __entry->dev = bio->bi_bdev->bd_dev;
+ __entry->sector = bio->bi_sector;
+ __entry->nr_sector = bio->bi_size >> 9;
+ blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size);
+ memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
+ ),
+
+ TP_printk("%d,%d %s %llu + %u [%s]",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ __entry->rwbs,
+ (unsigned long long)__entry->sector,
+ __entry->nr_sector, __entry->comm)
+);
+
+
+DEFINE_EVENT(bcache_bio, bcache_passthrough,
+
+ TP_PROTO(struct bio *bio),
+
+ TP_ARGS(bio)
+);
+
+DEFINE_EVENT(bcache_bio, bcache_cache_hit,
+
+ TP_PROTO(struct bio *bio),
+
+ TP_ARGS(bio)
+);
+
+DEFINE_EVENT(bcache_bio, bcache_cache_miss,
+
+ TP_PROTO(struct bio *bio),
+
+ TP_ARGS(bio)
+);
+
+DEFINE_EVENT(bcache_bio, bcache_read_retry,
+
+ TP_PROTO(struct bio *bio),
+
+ TP_ARGS(bio)
+);
+
+DEFINE_EVENT(bcache_bio, bcache_writethrough,
+
+ TP_PROTO(struct bio *bio),
+
+ TP_ARGS(bio)
+);
+
+DEFINE_EVENT(bcache_bio, bcache_writeback,
+
+ TP_PROTO(struct bio *bio),
+
+ TP_ARGS(bio)
+);
+
+DEFINE_EVENT(bcache_bio, bcache_write_skip,
+
+ TP_PROTO(struct bio *bio),
+
+ TP_ARGS(bio)
+);
+
+DEFINE_EVENT(bcache_bio, bcache_btree_read,
+
+ TP_PROTO(struct bio *bio),
+
+ TP_ARGS(bio)
+);
+
+DEFINE_EVENT(bcache_bio, bcache_btree_write,
+
+ TP_PROTO(struct bio *bio),
+
+ TP_ARGS(bio)
+);
+
+DEFINE_EVENT(bcache_bio, bcache_write_dirty,
+
+ TP_PROTO(struct bio *bio),
+
+ TP_ARGS(bio)
+);
+
+DEFINE_EVENT(bcache_bio, bcache_read_dirty,
+
+ TP_PROTO(struct bio *bio),
+
+ TP_ARGS(bio)
+);
+
+DEFINE_EVENT(bcache_bio, bcache_write_moving,
+
+ TP_PROTO(struct bio *bio),
+
+ TP_ARGS(bio)
+);
+
+DEFINE_EVENT(bcache_bio, bcache_read_moving,
+
+ TP_PROTO(struct bio *bio),
+
+ TP_ARGS(bio)
+);
+
+DEFINE_EVENT(bcache_bio, bcache_journal_write,
+
+ TP_PROTO(struct bio *bio),
+
+ TP_ARGS(bio)
+);
+
+DECLARE_EVENT_CLASS(bcache_cache_bio,
+
+ TP_PROTO(struct bio *bio,
+ sector_t orig_sector,
+ struct block_device* orig_bdev),
+
+ TP_ARGS(bio, orig_sector, orig_bdev),
+
+ TP_STRUCT__entry(
+ __field(dev_t, dev )
+ __field(dev_t, orig_dev )
+ __field(sector_t, sector )
+ __field(sector_t, orig_sector )
+ __field(unsigned int, nr_sector )
+ __array(char, rwbs, 6 )
+ __array(char, comm, TASK_COMM_LEN )
+ ),
+
+ TP_fast_assign(
+ __entry->dev = bio->bi_bdev->bd_dev;
+ __entry->orig_dev = orig_bdev->bd_dev;
+ __entry->sector = bio->bi_sector;
+ __entry->orig_sector = orig_sector;
+ __entry->nr_sector = bio->bi_size >> 9;
+ blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size);
+ memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
+ ),
+
+ TP_printk("%d,%d %s %llu + %u [%s] (from %d,%d %llu)",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ __entry->rwbs,
+ (unsigned long long)__entry->sector,
+ __entry->nr_sector, __entry->comm,
+ MAJOR(__entry->orig_dev), MINOR(__entry->orig_dev),
+ (unsigned long long)__entry->orig_sector)
+);
+
+DEFINE_EVENT(bcache_cache_bio, bcache_cache_insert,
+
+ TP_PROTO(struct bio *bio,
+ sector_t orig_sector,
+ struct block_device *orig_bdev),
+
+ TP_ARGS(bio, orig_sector, orig_bdev)
+);
+
+DECLARE_EVENT_CLASS(bcache_gc,
+
+ TP_PROTO(uint8_t *uuid),
+
+ TP_ARGS(uuid),
+
+ TP_STRUCT__entry(
+ __field(uint8_t *, uuid)
+ ),
+
+ TP_fast_assign(
+ __entry->uuid = uuid;
+ ),
+
+ TP_printk("%pU", __entry->uuid)
+);
+
+
+DEFINE_EVENT(bcache_gc, bcache_gc_start,
+
+ TP_PROTO(uint8_t *uuid),
+
+ TP_ARGS(uuid)
+);
+
+DEFINE_EVENT(bcache_gc, bcache_gc_end,
+
+ TP_PROTO(uint8_t *uuid),
+
+ TP_ARGS(uuid)
+);
+
+#endif /* _TRACE_BCACHE_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/events/block.h b/include/trace/events/block.h
index 9c1467357b03..60ae7c3db912 100644
--- a/include/trace/events/block.h
+++ b/include/trace/events/block.h
@@ -244,7 +244,7 @@ TRACE_EVENT(block_bio_bounce,
__entry->dev = bio->bi_bdev ?
bio->bi_bdev->bd_dev : 0;
__entry->sector = bio->bi_sector;
- __entry->nr_sector = bio->bi_size >> 9;
+ __entry->nr_sector = bio_sectors(bio);
blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size);
memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
),
@@ -281,7 +281,7 @@ TRACE_EVENT(block_bio_complete,
TP_fast_assign(
__entry->dev = bio->bi_bdev->bd_dev;
__entry->sector = bio->bi_sector;
- __entry->nr_sector = bio->bi_size >> 9;
+ __entry->nr_sector = bio_sectors(bio);
__entry->error = error;
blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size);
),
@@ -309,7 +309,7 @@ DECLARE_EVENT_CLASS(block_bio_merge,
TP_fast_assign(
__entry->dev = bio->bi_bdev->bd_dev;
__entry->sector = bio->bi_sector;
- __entry->nr_sector = bio->bi_size >> 9;
+ __entry->nr_sector = bio_sectors(bio);
blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size);
memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
),
@@ -376,7 +376,7 @@ TRACE_EVENT(block_bio_queue,
TP_fast_assign(
__entry->dev = bio->bi_bdev->bd_dev;
__entry->sector = bio->bi_sector;
- __entry->nr_sector = bio->bi_size >> 9;
+ __entry->nr_sector = bio_sectors(bio);
blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size);
memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
),
@@ -404,7 +404,7 @@ DECLARE_EVENT_CLASS(block_get_rq,
TP_fast_assign(
__entry->dev = bio ? bio->bi_bdev->bd_dev : 0;
__entry->sector = bio ? bio->bi_sector : 0;
- __entry->nr_sector = bio ? bio->bi_size >> 9 : 0;
+ __entry->nr_sector = bio ? bio_sectors(bio) : 0;
blk_fill_rwbs(__entry->rwbs,
bio ? bio->bi_rw : 0, __entry->nr_sector);
memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
@@ -580,7 +580,7 @@ TRACE_EVENT(block_bio_remap,
TP_fast_assign(
__entry->dev = bio->bi_bdev->bd_dev;
__entry->sector = bio->bi_sector;
- __entry->nr_sector = bio->bi_size >> 9;
+ __entry->nr_sector = bio_sectors(bio);
__entry->old_dev = dev;
__entry->old_sector = from;
blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size);
diff --git a/include/trace/events/f2fs.h b/include/trace/events/f2fs.h
new file mode 100644
index 000000000000..52ae54828eda
--- /dev/null
+++ b/include/trace/events/f2fs.h
@@ -0,0 +1,682 @@
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM f2fs
+
+#if !defined(_TRACE_F2FS_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_F2FS_H
+
+#include <linux/tracepoint.h>
+
+#define show_dev(entry) MAJOR(entry->dev), MINOR(entry->dev)
+#define show_dev_ino(entry) show_dev(entry), (unsigned long)entry->ino
+
+#define show_block_type(type) \
+ __print_symbolic(type, \
+ { NODE, "NODE" }, \
+ { DATA, "DATA" }, \
+ { META, "META" }, \
+ { META_FLUSH, "META_FLUSH" })
+
+#define show_bio_type(type) \
+ __print_symbolic(type, \
+ { READ, "READ" }, \
+ { READA, "READAHEAD" }, \
+ { READ_SYNC, "READ_SYNC" }, \
+ { WRITE, "WRITE" }, \
+ { WRITE_SYNC, "WRITE_SYNC" }, \
+ { WRITE_FLUSH, "WRITE_FLUSH" }, \
+ { WRITE_FUA, "WRITE_FUA" })
+
+#define show_data_type(type) \
+ __print_symbolic(type, \
+ { CURSEG_HOT_DATA, "Hot DATA" }, \
+ { CURSEG_WARM_DATA, "Warm DATA" }, \
+ { CURSEG_COLD_DATA, "Cold DATA" }, \
+ { CURSEG_HOT_NODE, "Hot NODE" }, \
+ { CURSEG_WARM_NODE, "Warm NODE" }, \
+ { CURSEG_COLD_NODE, "Cold NODE" }, \
+ { NO_CHECK_TYPE, "No TYPE" })
+
+#define show_gc_type(type) \
+ __print_symbolic(type, \
+ { FG_GC, "Foreground GC" }, \
+ { BG_GC, "Background GC" })
+
+#define show_alloc_mode(type) \
+ __print_symbolic(type, \
+ { LFS, "LFS-mode" }, \
+ { SSR, "SSR-mode" })
+
+#define show_victim_policy(type) \
+ __print_symbolic(type, \
+ { GC_GREEDY, "Greedy" }, \
+ { GC_CB, "Cost-Benefit" })
+
+struct victim_sel_policy;
+
+DECLARE_EVENT_CLASS(f2fs__inode,
+
+ TP_PROTO(struct inode *inode),
+
+ TP_ARGS(inode),
+
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(ino_t, ino)
+ __field(ino_t, pino)
+ __field(umode_t, mode)
+ __field(loff_t, size)
+ __field(unsigned int, nlink)
+ __field(blkcnt_t, blocks)
+ __field(__u8, advise)
+ ),
+
+ TP_fast_assign(
+ __entry->dev = inode->i_sb->s_dev;
+ __entry->ino = inode->i_ino;
+ __entry->pino = F2FS_I(inode)->i_pino;
+ __entry->mode = inode->i_mode;
+ __entry->nlink = inode->i_nlink;
+ __entry->size = inode->i_size;
+ __entry->blocks = inode->i_blocks;
+ __entry->advise = F2FS_I(inode)->i_advise;
+ ),
+
+ TP_printk("dev = (%d,%d), ino = %lu, pino = %lu, i_mode = 0x%hx, "
+ "i_size = %lld, i_nlink = %u, i_blocks = %llu, i_advise = 0x%x",
+ show_dev_ino(__entry),
+ (unsigned long)__entry->pino,
+ __entry->mode,
+ __entry->size,
+ (unsigned int)__entry->nlink,
+ (unsigned long long)__entry->blocks,
+ (unsigned char)__entry->advise)
+);
+
+DECLARE_EVENT_CLASS(f2fs__inode_exit,
+
+ TP_PROTO(struct inode *inode, int ret),
+
+ TP_ARGS(inode, ret),
+
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(ino_t, ino)
+ __field(int, ret)
+ ),
+
+ TP_fast_assign(
+ __entry->dev = inode->i_sb->s_dev;
+ __entry->ino = inode->i_ino;
+ __entry->ret = ret;
+ ),
+
+ TP_printk("dev = (%d,%d), ino = %lu, ret = %d",
+ show_dev_ino(__entry),
+ __entry->ret)
+);
+
+DEFINE_EVENT(f2fs__inode, f2fs_sync_file_enter,
+
+ TP_PROTO(struct inode *inode),
+
+ TP_ARGS(inode)
+);
+
+TRACE_EVENT(f2fs_sync_file_exit,
+
+ TP_PROTO(struct inode *inode, bool need_cp, int datasync, int ret),
+
+ TP_ARGS(inode, need_cp, datasync, ret),
+
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(ino_t, ino)
+ __field(bool, need_cp)
+ __field(int, datasync)
+ __field(int, ret)
+ ),
+
+ TP_fast_assign(
+ __entry->dev = inode->i_sb->s_dev;
+ __entry->ino = inode->i_ino;
+ __entry->need_cp = need_cp;
+ __entry->datasync = datasync;
+ __entry->ret = ret;
+ ),
+
+ TP_printk("dev = (%d,%d), ino = %lu, checkpoint is %s, "
+ "datasync = %d, ret = %d",
+ show_dev_ino(__entry),
+ __entry->need_cp ? "needed" : "not needed",
+ __entry->datasync,
+ __entry->ret)
+);
+
+TRACE_EVENT(f2fs_sync_fs,
+
+ TP_PROTO(struct super_block *sb, int wait),
+
+ TP_ARGS(sb, wait),
+
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(int, dirty)
+ __field(int, wait)
+ ),
+
+ TP_fast_assign(
+ __entry->dev = sb->s_dev;
+ __entry->dirty = F2FS_SB(sb)->s_dirty;
+ __entry->wait = wait;
+ ),
+
+ TP_printk("dev = (%d,%d), superblock is %s, wait = %d",
+ show_dev(__entry),
+ __entry->dirty ? "dirty" : "not dirty",
+ __entry->wait)
+);
+
+DEFINE_EVENT(f2fs__inode, f2fs_iget,
+
+ TP_PROTO(struct inode *inode),
+
+ TP_ARGS(inode)
+);
+
+DEFINE_EVENT(f2fs__inode_exit, f2fs_iget_exit,
+
+ TP_PROTO(struct inode *inode, int ret),
+
+ TP_ARGS(inode, ret)
+);
+
+DEFINE_EVENT(f2fs__inode, f2fs_evict_inode,
+
+ TP_PROTO(struct inode *inode),
+
+ TP_ARGS(inode)
+);
+
+DEFINE_EVENT(f2fs__inode_exit, f2fs_new_inode,
+
+ TP_PROTO(struct inode *inode, int ret),
+
+ TP_ARGS(inode, ret)
+);
+
+TRACE_EVENT(f2fs_unlink_enter,
+
+ TP_PROTO(struct inode *dir, struct dentry *dentry),
+
+ TP_ARGS(dir, dentry),
+
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(ino_t, ino)
+ __field(loff_t, size)
+ __field(blkcnt_t, blocks)
+ __field(const char *, name)
+ ),
+
+ TP_fast_assign(
+ __entry->dev = dir->i_sb->s_dev;
+ __entry->ino = dir->i_ino;
+ __entry->size = dir->i_size;
+ __entry->blocks = dir->i_blocks;
+ __entry->name = dentry->d_name.name;
+ ),
+
+ TP_printk("dev = (%d,%d), dir ino = %lu, i_size = %lld, "
+ "i_blocks = %llu, name = %s",
+ show_dev_ino(__entry),
+ __entry->size,
+ (unsigned long long)__entry->blocks,
+ __entry->name)
+);
+
+DEFINE_EVENT(f2fs__inode_exit, f2fs_unlink_exit,
+
+ TP_PROTO(struct inode *inode, int ret),
+
+ TP_ARGS(inode, ret)
+);
+
+DEFINE_EVENT(f2fs__inode, f2fs_truncate,
+
+ TP_PROTO(struct inode *inode),
+
+ TP_ARGS(inode)
+);
+
+TRACE_EVENT(f2fs_truncate_data_blocks_range,
+
+ TP_PROTO(struct inode *inode, nid_t nid, unsigned int ofs, int free),
+
+ TP_ARGS(inode, nid, ofs, free),
+
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(ino_t, ino)
+ __field(nid_t, nid)
+ __field(unsigned int, ofs)
+ __field(int, free)
+ ),
+
+ TP_fast_assign(
+ __entry->dev = inode->i_sb->s_dev;
+ __entry->ino = inode->i_ino;
+ __entry->nid = nid;
+ __entry->ofs = ofs;
+ __entry->free = free;
+ ),
+
+ TP_printk("dev = (%d,%d), ino = %lu, nid = %u, offset = %u, freed = %d",
+ show_dev_ino(__entry),
+ (unsigned int)__entry->nid,
+ __entry->ofs,
+ __entry->free)
+);
+
+DECLARE_EVENT_CLASS(f2fs__truncate_op,
+
+ TP_PROTO(struct inode *inode, u64 from),
+
+ TP_ARGS(inode, from),
+
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(ino_t, ino)
+ __field(loff_t, size)
+ __field(blkcnt_t, blocks)
+ __field(u64, from)
+ ),
+
+ TP_fast_assign(
+ __entry->dev = inode->i_sb->s_dev;
+ __entry->ino = inode->i_ino;
+ __entry->size = inode->i_size;
+ __entry->blocks = inode->i_blocks;
+ __entry->from = from;
+ ),
+
+ TP_printk("dev = (%d,%d), ino = %lu, i_size = %lld, i_blocks = %llu, "
+ "start file offset = %llu",
+ show_dev_ino(__entry),
+ __entry->size,
+ (unsigned long long)__entry->blocks,
+ (unsigned long long)__entry->from)
+);
+
+DEFINE_EVENT(f2fs__truncate_op, f2fs_truncate_blocks_enter,
+
+ TP_PROTO(struct inode *inode, u64 from),
+
+ TP_ARGS(inode, from)
+);
+
+DEFINE_EVENT(f2fs__inode_exit, f2fs_truncate_blocks_exit,
+
+ TP_PROTO(struct inode *inode, int ret),
+
+ TP_ARGS(inode, ret)
+);
+
+DEFINE_EVENT(f2fs__truncate_op, f2fs_truncate_inode_blocks_enter,
+
+ TP_PROTO(struct inode *inode, u64 from),
+
+ TP_ARGS(inode, from)
+);
+
+DEFINE_EVENT(f2fs__inode_exit, f2fs_truncate_inode_blocks_exit,
+
+ TP_PROTO(struct inode *inode, int ret),
+
+ TP_ARGS(inode, ret)
+);
+
+DECLARE_EVENT_CLASS(f2fs__truncate_node,
+
+ TP_PROTO(struct inode *inode, nid_t nid, block_t blk_addr),
+
+ TP_ARGS(inode, nid, blk_addr),
+
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(ino_t, ino)
+ __field(nid_t, nid)
+ __field(block_t, blk_addr)
+ ),
+
+ TP_fast_assign(
+ __entry->dev = inode->i_sb->s_dev;
+ __entry->ino = inode->i_ino;
+ __entry->nid = nid;
+ __entry->blk_addr = blk_addr;
+ ),
+
+ TP_printk("dev = (%d,%d), ino = %lu, nid = %u, block_address = 0x%llx",
+ show_dev_ino(__entry),
+ (unsigned int)__entry->nid,
+ (unsigned long long)__entry->blk_addr)
+);
+
+DEFINE_EVENT(f2fs__truncate_node, f2fs_truncate_nodes_enter,
+
+ TP_PROTO(struct inode *inode, nid_t nid, block_t blk_addr),
+
+ TP_ARGS(inode, nid, blk_addr)
+);
+
+DEFINE_EVENT(f2fs__inode_exit, f2fs_truncate_nodes_exit,
+
+ TP_PROTO(struct inode *inode, int ret),
+
+ TP_ARGS(inode, ret)
+);
+
+DEFINE_EVENT(f2fs__truncate_node, f2fs_truncate_node,
+
+ TP_PROTO(struct inode *inode, nid_t nid, block_t blk_addr),
+
+ TP_ARGS(inode, nid, blk_addr)
+);
+
+TRACE_EVENT(f2fs_truncate_partial_nodes,
+
+ TP_PROTO(struct inode *inode, nid_t nid[], int depth, int err),
+
+ TP_ARGS(inode, nid, depth, err),
+
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(ino_t, ino)
+ __field(nid_t, nid[3])
+ __field(int, depth)
+ __field(int, err)
+ ),
+
+ TP_fast_assign(
+ __entry->dev = inode->i_sb->s_dev;
+ __entry->ino = inode->i_ino;
+ __entry->nid[0] = nid[0];
+ __entry->nid[1] = nid[1];
+ __entry->nid[2] = nid[2];
+ __entry->depth = depth;
+ __entry->err = err;
+ ),
+
+ TP_printk("dev = (%d,%d), ino = %lu, "
+ "nid[0] = %u, nid[1] = %u, nid[2] = %u, depth = %d, err = %d",
+ show_dev_ino(__entry),
+ (unsigned int)__entry->nid[0],
+ (unsigned int)__entry->nid[1],
+ (unsigned int)__entry->nid[2],
+ __entry->depth,
+ __entry->err)
+);
+
+TRACE_EVENT_CONDITION(f2fs_readpage,
+
+ TP_PROTO(struct page *page, sector_t blkaddr, int type),
+
+ TP_ARGS(page, blkaddr, type),
+
+ TP_CONDITION(page->mapping),
+
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(ino_t, ino)
+ __field(pgoff_t, index)
+ __field(sector_t, blkaddr)
+ __field(int, type)
+ ),
+
+ TP_fast_assign(
+ __entry->dev = page->mapping->host->i_sb->s_dev;
+ __entry->ino = page->mapping->host->i_ino;
+ __entry->index = page->index;
+ __entry->blkaddr = blkaddr;
+ __entry->type = type;
+ ),
+
+ TP_printk("dev = (%d,%d), ino = %lu, page_index = 0x%lx, "
+ "blkaddr = 0x%llx, bio_type = %s",
+ show_dev_ino(__entry),
+ (unsigned long)__entry->index,
+ (unsigned long long)__entry->blkaddr,
+ show_bio_type(__entry->type))
+);
+
+TRACE_EVENT(f2fs_get_data_block,
+ TP_PROTO(struct inode *inode, sector_t iblock,
+ struct buffer_head *bh, int ret),
+
+ TP_ARGS(inode, iblock, bh, ret),
+
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(ino_t, ino)
+ __field(sector_t, iblock)
+ __field(sector_t, bh_start)
+ __field(size_t, bh_size)
+ __field(int, ret)
+ ),
+
+ TP_fast_assign(
+ __entry->dev = inode->i_sb->s_dev;
+ __entry->ino = inode->i_ino;
+ __entry->iblock = iblock;
+ __entry->bh_start = bh->b_blocknr;
+ __entry->bh_size = bh->b_size;
+ __entry->ret = ret;
+ ),
+
+ TP_printk("dev = (%d,%d), ino = %lu, file offset = %llu, "
+ "start blkaddr = 0x%llx, len = 0x%llx bytes, err = %d",
+ show_dev_ino(__entry),
+ (unsigned long long)__entry->iblock,
+ (unsigned long long)__entry->bh_start,
+ (unsigned long long)__entry->bh_size,
+ __entry->ret)
+);
+
+TRACE_EVENT(f2fs_get_victim,
+
+ TP_PROTO(struct super_block *sb, int type, int gc_type,
+ struct victim_sel_policy *p, unsigned int pre_victim,
+ unsigned int prefree, unsigned int free),
+
+ TP_ARGS(sb, type, gc_type, p, pre_victim, prefree, free),
+
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(int, type)
+ __field(int, gc_type)
+ __field(int, alloc_mode)
+ __field(int, gc_mode)
+ __field(unsigned int, victim)
+ __field(unsigned int, ofs_unit)
+ __field(unsigned int, pre_victim)
+ __field(unsigned int, prefree)
+ __field(unsigned int, free)
+ ),
+
+ TP_fast_assign(
+ __entry->dev = sb->s_dev;
+ __entry->type = type;
+ __entry->gc_type = gc_type;
+ __entry->alloc_mode = p->alloc_mode;
+ __entry->gc_mode = p->gc_mode;
+ __entry->victim = p->min_segno;
+ __entry->ofs_unit = p->ofs_unit;
+ __entry->pre_victim = pre_victim;
+ __entry->prefree = prefree;
+ __entry->free = free;
+ ),
+
+ TP_printk("dev = (%d,%d), type = %s, policy = (%s, %s, %s), victim = %u "
+ "ofs_unit = %u, pre_victim_secno = %d, prefree = %u, free = %u",
+ show_dev(__entry),
+ show_data_type(__entry->type),
+ show_gc_type(__entry->gc_type),
+ show_alloc_mode(__entry->alloc_mode),
+ show_victim_policy(__entry->gc_mode),
+ __entry->victim,
+ __entry->ofs_unit,
+ (int)__entry->pre_victim,
+ __entry->prefree,
+ __entry->free)
+);
+
+TRACE_EVENT(f2fs_fallocate,
+
+ TP_PROTO(struct inode *inode, int mode,
+ loff_t offset, loff_t len, int ret),
+
+ TP_ARGS(inode, mode, offset, len, ret),
+
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(ino_t, ino)
+ __field(int, mode)
+ __field(loff_t, offset)
+ __field(loff_t, len)
+ __field(loff_t, size)
+ __field(blkcnt_t, blocks)
+ __field(int, ret)
+ ),
+
+ TP_fast_assign(
+ __entry->dev = inode->i_sb->s_dev;
+ __entry->ino = inode->i_ino;
+ __entry->mode = mode;
+ __entry->offset = offset;
+ __entry->len = len;
+ __entry->size = inode->i_size;
+ __entry->blocks = inode->i_blocks;
+ __entry->ret = ret;
+ ),
+
+ TP_printk("dev = (%d,%d), ino = %lu, mode = %x, offset = %lld, "
+ "len = %lld, i_size = %lld, i_blocks = %llu, ret = %d",
+ show_dev_ino(__entry),
+ __entry->mode,
+ (unsigned long long)__entry->offset,
+ (unsigned long long)__entry->len,
+ (unsigned long long)__entry->size,
+ (unsigned long long)__entry->blocks,
+ __entry->ret)
+);
+
+TRACE_EVENT(f2fs_reserve_new_block,
+
+ TP_PROTO(struct inode *inode, nid_t nid, unsigned int ofs_in_node),
+
+ TP_ARGS(inode, nid, ofs_in_node),
+
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(nid_t, nid)
+ __field(unsigned int, ofs_in_node)
+ ),
+
+ TP_fast_assign(
+ __entry->dev = inode->i_sb->s_dev;
+ __entry->nid = nid;
+ __entry->ofs_in_node = ofs_in_node;
+ ),
+
+ TP_printk("dev = (%d,%d), nid = %u, ofs_in_node = %u",
+ show_dev(__entry),
+ (unsigned int)__entry->nid,
+ __entry->ofs_in_node)
+);
+
+TRACE_EVENT(f2fs_do_submit_bio,
+
+ TP_PROTO(struct super_block *sb, int btype, bool sync, struct bio *bio),
+
+ TP_ARGS(sb, btype, sync, bio),
+
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(int, btype)
+ __field(bool, sync)
+ __field(sector_t, sector)
+ __field(unsigned int, size)
+ ),
+
+ TP_fast_assign(
+ __entry->dev = sb->s_dev;
+ __entry->btype = btype;
+ __entry->sync = sync;
+ __entry->sector = bio->bi_sector;
+ __entry->size = bio->bi_size;
+ ),
+
+ TP_printk("dev = (%d,%d), type = %s, io = %s, sector = %lld, size = %u",
+ show_dev(__entry),
+ show_block_type(__entry->btype),
+ __entry->sync ? "sync" : "no sync",
+ (unsigned long long)__entry->sector,
+ __entry->size)
+);
+
+TRACE_EVENT(f2fs_submit_write_page,
+
+ TP_PROTO(struct page *page, block_t blk_addr, int type),
+
+ TP_ARGS(page, blk_addr, type),
+
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(ino_t, ino)
+ __field(int, type)
+ __field(pgoff_t, index)
+ __field(block_t, block)
+ ),
+
+ TP_fast_assign(
+ __entry->dev = page->mapping->host->i_sb->s_dev;
+ __entry->ino = page->mapping->host->i_ino;
+ __entry->type = type;
+ __entry->index = page->index;
+ __entry->block = blk_addr;
+ ),
+
+ TP_printk("dev = (%d,%d), ino = %lu, %s, index = %lu, blkaddr = 0x%llx",
+ show_dev_ino(__entry),
+ show_block_type(__entry->type),
+ (unsigned long)__entry->index,
+ (unsigned long long)__entry->block)
+);
+
+TRACE_EVENT(f2fs_write_checkpoint,
+
+ TP_PROTO(struct super_block *sb, bool is_umount, char *msg),
+
+ TP_ARGS(sb, is_umount, msg),
+
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(bool, is_umount)
+ __field(char *, msg)
+ ),
+
+ TP_fast_assign(
+ __entry->dev = sb->s_dev;
+ __entry->is_umount = is_umount;
+ __entry->msg = msg;
+ ),
+
+ TP_printk("dev = (%d,%d), checkpoint for %s, state = %s",
+ show_dev(__entry),
+ __entry->is_umount ? "clean umount" : "consistency",
+ __entry->msg)
+);
+
+#endif /* _TRACE_F2FS_H */
+
+ /* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/events/writeback.h b/include/trace/events/writeback.h
index 6a16fd2e70ed..464ea82e10db 100644
--- a/include/trace/events/writeback.h
+++ b/include/trace/events/writeback.h
@@ -183,7 +183,6 @@ DECLARE_EVENT_CLASS(writeback_work_class,
DEFINE_EVENT(writeback_work_class, name, \
TP_PROTO(struct backing_dev_info *bdi, struct wb_writeback_work *work), \
TP_ARGS(bdi, work))
-DEFINE_WRITEBACK_WORK_EVENT(writeback_nothread);
DEFINE_WRITEBACK_WORK_EVENT(writeback_queue);
DEFINE_WRITEBACK_WORK_EVENT(writeback_exec);
DEFINE_WRITEBACK_WORK_EVENT(writeback_start);
@@ -222,12 +221,8 @@ DEFINE_EVENT(writeback_class, name, \
DEFINE_WRITEBACK_EVENT(writeback_nowork);
DEFINE_WRITEBACK_EVENT(writeback_wake_background);
-DEFINE_WRITEBACK_EVENT(writeback_wake_thread);
-DEFINE_WRITEBACK_EVENT(writeback_wake_forker_thread);
DEFINE_WRITEBACK_EVENT(writeback_bdi_register);
DEFINE_WRITEBACK_EVENT(writeback_bdi_unregister);
-DEFINE_WRITEBACK_EVENT(writeback_thread_start);
-DEFINE_WRITEBACK_EVENT(writeback_thread_stop);
DECLARE_EVENT_CLASS(wbc_class,
TP_PROTO(struct writeback_control *wbc, struct backing_dev_info *bdi),