diff options
Diffstat (limited to 'include/linux')
171 files changed, 2767 insertions, 1423 deletions
diff --git a/include/linux/acpi.h b/include/linux/acpi.h index 807cbc46d73e..b7926bb9b444 100644 --- a/include/linux/acpi.h +++ b/include/linux/acpi.h @@ -587,7 +587,6 @@ static inline int acpi_subsys_freeze(struct device *dev) { return 0; } #if defined(CONFIG_ACPI) && defined(CONFIG_PM) struct acpi_device *acpi_dev_pm_get_node(struct device *dev); int acpi_dev_pm_attach(struct device *dev, bool power_on); -void acpi_dev_pm_detach(struct device *dev, bool power_off); #else static inline struct acpi_device *acpi_dev_pm_get_node(struct device *dev) { @@ -597,7 +596,6 @@ static inline int acpi_dev_pm_attach(struct device *dev, bool power_on) { return -ENODEV; } -static inline void acpi_dev_pm_detach(struct device *dev, bool power_off) {} #endif #ifdef CONFIG_ACPI diff --git a/include/linux/aer.h b/include/linux/aer.h index c826d1c28f9c..4fef65e57023 100644 --- a/include/linux/aer.h +++ b/include/linux/aer.h @@ -7,6 +7,8 @@ #ifndef _AER_H_ #define _AER_H_ +#include <linux/types.h> + #define AER_NONFATAL 0 #define AER_FATAL 1 #define AER_CORRECTABLE 2 diff --git a/include/linux/ahci_platform.h b/include/linux/ahci_platform.h index 09a947e8bc87..642d6ae4030c 100644 --- a/include/linux/ahci_platform.h +++ b/include/linux/ahci_platform.h @@ -22,19 +22,6 @@ struct ata_port_info; struct ahci_host_priv; struct platform_device; -/* - * Note ahci_platform_data is deprecated, it is only kept around for use - * by the old da850 and spear13xx ahci code. - * New drivers should instead declare their own platform_driver struct, and - * use ahci_platform* functions in their own probe, suspend and resume methods. - */ -struct ahci_platform_data { - int (*init)(struct device *dev, void __iomem *addr); - void (*exit)(struct device *dev); - int (*suspend)(struct device *dev); - int (*resume)(struct device *dev); -}; - int ahci_platform_enable_clks(struct ahci_host_priv *hpriv); void ahci_platform_disable_clks(struct ahci_host_priv *hpriv); int ahci_platform_enable_resources(struct ahci_host_priv *hpriv); diff --git a/include/linux/ata_platform.h b/include/linux/ata_platform.h index b9fde17f767c..5c618a084225 100644 --- a/include/linux/ata_platform.h +++ b/include/linux/ata_platform.h @@ -8,11 +8,6 @@ struct pata_platform_info { * spacing used by ata_std_ports(). */ unsigned int ioport_shift; - /* - * Indicate platform specific irq types and initial - * IRQ flags when call request_irq() - */ - unsigned int irq_flags; }; extern int __pata_platform_probe(struct device *dev, diff --git a/include/linux/ath9k_platform.h b/include/linux/ath9k_platform.h index a495a959e8a7..33eb274cd0e6 100644 --- a/include/linux/ath9k_platform.h +++ b/include/linux/ath9k_platform.h @@ -31,8 +31,11 @@ struct ath9k_platform_data { u32 gpio_mask; u32 gpio_val; + bool endian_check; bool is_clk_25mhz; bool tx_gain_buffalo; + bool disable_2ghz; + bool disable_5ghz; int (*get_mac_revision)(void); int (*external_reset)(void); diff --git a/include/linux/atmel-mci.h b/include/linux/atmel-mci.h index 4c7a4b2104bf..91b77f8d495d 100644 --- a/include/linux/atmel-mci.h +++ b/include/linux/atmel-mci.h @@ -1,6 +1,8 @@ #ifndef __LINUX_ATMEL_MCI_H #define __LINUX_ATMEL_MCI_H +#include <linux/types.h> + #define ATMCI_MAX_NR_SLOTS 2 /** diff --git a/include/linux/atomic.h b/include/linux/atomic.h index fef3a809e7cf..5b08a8540ecf 100644 --- a/include/linux/atomic.h +++ b/include/linux/atomic.h @@ -3,42 +3,6 @@ #define _LINUX_ATOMIC_H #include <asm/atomic.h> -/* - * Provide __deprecated wrappers for the new interface, avoid flag day changes. - * We need the ugly external functions to break header recursion hell. - */ -#ifndef smp_mb__before_atomic_inc -static inline void __deprecated smp_mb__before_atomic_inc(void) -{ - extern void __smp_mb__before_atomic(void); - __smp_mb__before_atomic(); -} -#endif - -#ifndef smp_mb__after_atomic_inc -static inline void __deprecated smp_mb__after_atomic_inc(void) -{ - extern void __smp_mb__after_atomic(void); - __smp_mb__after_atomic(); -} -#endif - -#ifndef smp_mb__before_atomic_dec -static inline void __deprecated smp_mb__before_atomic_dec(void) -{ - extern void __smp_mb__before_atomic(void); - __smp_mb__before_atomic(); -} -#endif - -#ifndef smp_mb__after_atomic_dec -static inline void __deprecated smp_mb__after_atomic_dec(void) -{ - extern void __smp_mb__after_atomic(void); - __smp_mb__after_atomic(); -} -#endif - /** * atomic_add_unless - add unless the number is already a given value * @v: pointer of type atomic_t diff --git a/include/linux/backing-dev.h b/include/linux/backing-dev.h index e488e9459a93..5da6012b7a14 100644 --- a/include/linux/backing-dev.h +++ b/include/linux/backing-dev.h @@ -28,12 +28,10 @@ struct dentry; * Bits in backing_dev_info.state */ enum bdi_state { - BDI_wb_alloc, /* Default embedded wb allocated */ BDI_async_congested, /* The async (write) queue is getting full */ BDI_sync_congested, /* The sync queue is getting full */ BDI_registered, /* bdi_register() was done */ BDI_writeback_running, /* Writeback is in progress */ - BDI_unused, /* Available bits start here */ }; typedef int (congested_fn)(void *, int); @@ -50,7 +48,6 @@ enum bdi_stat_item { struct bdi_writeback { struct backing_dev_info *bdi; /* our parent bdi */ - unsigned int nr; unsigned long last_old_flush; /* last old data flush */ @@ -124,7 +121,6 @@ void bdi_start_background_writeback(struct backing_dev_info *bdi); void bdi_writeback_workfn(struct work_struct *work); int bdi_has_dirty_io(struct backing_dev_info *bdi); void bdi_wakeup_thread_delayed(struct backing_dev_info *bdi); -void bdi_lock_two(struct bdi_writeback *wb1, struct bdi_writeback *wb2); extern spinlock_t bdi_lock; extern struct list_head bdi_list; diff --git a/include/linux/balloon_compaction.h b/include/linux/balloon_compaction.h index 089743ade734..9b0a15d06a4f 100644 --- a/include/linux/balloon_compaction.h +++ b/include/linux/balloon_compaction.h @@ -27,10 +27,13 @@ * counter raised only while it is under our special handling; * * iii. after the lockless scan step have selected a potential balloon page for - * isolation, re-test the page->mapping flags and the page ref counter + * isolation, re-test the PageBalloon mark and the PagePrivate flag * under the proper page lock, to ensure isolating a valid balloon page * (not yet isolated, nor under release procedure) * + * iv. isolation or dequeueing procedure must clear PagePrivate flag under + * page lock together with removing page from balloon device page list. + * * The functions provided by this interface are placed to help on coping with * the aforementioned balloon page corner case, as well as to ensure the simple * set of exposed rules are satisfied while we are dealing with balloon pages @@ -54,43 +57,22 @@ * balloon driver as a page book-keeper for its registered balloon devices. */ struct balloon_dev_info { - void *balloon_device; /* balloon device descriptor */ - struct address_space *mapping; /* balloon special page->mapping */ unsigned long isolated_pages; /* # of isolated pages for migration */ spinlock_t pages_lock; /* Protection to pages list */ struct list_head pages; /* Pages enqueued & handled to Host */ + int (*migratepage)(struct balloon_dev_info *, struct page *newpage, + struct page *page, enum migrate_mode mode); }; extern struct page *balloon_page_enqueue(struct balloon_dev_info *b_dev_info); extern struct page *balloon_page_dequeue(struct balloon_dev_info *b_dev_info); -extern struct balloon_dev_info *balloon_devinfo_alloc( - void *balloon_dev_descriptor); -static inline void balloon_devinfo_free(struct balloon_dev_info *b_dev_info) -{ - kfree(b_dev_info); -} - -/* - * balloon_page_free - release a balloon page back to the page free lists - * @page: ballooned page to be set free - * - * This function must be used to properly set free an isolated/dequeued balloon - * page at the end of a sucessful page migration, or at the balloon driver's - * page release procedure. - */ -static inline void balloon_page_free(struct page *page) +static inline void balloon_devinfo_init(struct balloon_dev_info *balloon) { - /* - * Balloon pages always get an extra refcount before being isolated - * and before being dequeued to help on sorting out fortuite colisions - * between a thread attempting to isolate and another thread attempting - * to release the very same balloon page. - * - * Before we handle the page back to Buddy, lets drop its extra refcnt. - */ - put_page(page); - __free_page(page); + balloon->isolated_pages = 0; + spin_lock_init(&balloon->pages_lock); + INIT_LIST_HEAD(&balloon->pages); + balloon->migratepage = NULL; } #ifdef CONFIG_BALLOON_COMPACTION @@ -98,107 +80,58 @@ extern bool balloon_page_isolate(struct page *page); extern void balloon_page_putback(struct page *page); extern int balloon_page_migrate(struct page *newpage, struct page *page, enum migrate_mode mode); -extern struct address_space -*balloon_mapping_alloc(struct balloon_dev_info *b_dev_info, - const struct address_space_operations *a_ops); - -static inline void balloon_mapping_free(struct address_space *balloon_mapping) -{ - kfree(balloon_mapping); -} /* - * page_flags_cleared - helper to perform balloon @page ->flags tests. - * - * As balloon pages are obtained from buddy and we do not play with page->flags - * at driver level (exception made when we get the page lock for compaction), - * we can safely identify a ballooned page by checking if the - * PAGE_FLAGS_CHECK_AT_PREP page->flags are all cleared. This approach also - * helps us skip ballooned pages that are locked for compaction or release, thus - * mitigating their racy check at balloon_page_movable() - */ -static inline bool page_flags_cleared(struct page *page) -{ - return !(page->flags & PAGE_FLAGS_CHECK_AT_PREP); -} - -/* - * __is_movable_balloon_page - helper to perform @page mapping->flags tests + * __is_movable_balloon_page - helper to perform @page PageBalloon tests */ static inline bool __is_movable_balloon_page(struct page *page) { - struct address_space *mapping = page->mapping; - return mapping_balloon(mapping); + return PageBalloon(page); } /* - * balloon_page_movable - test page->mapping->flags to identify balloon pages - * that can be moved by compaction/migration. - * - * This function is used at core compaction's page isolation scheme, therefore - * most pages exposed to it are not enlisted as balloon pages and so, to avoid - * undesired side effects like racing against __free_pages(), we cannot afford - * holding the page locked while testing page->mapping->flags here. + * balloon_page_movable - test PageBalloon to identify balloon pages + * and PagePrivate to check that the page is not + * isolated and can be moved by compaction/migration. * * As we might return false positives in the case of a balloon page being just - * released under us, the page->mapping->flags need to be re-tested later, - * under the proper page lock, at the functions that will be coping with the - * balloon page case. + * released under us, this need to be re-tested later, under the page lock. */ static inline bool balloon_page_movable(struct page *page) { - /* - * Before dereferencing and testing mapping->flags, let's make sure - * this is not a page that uses ->mapping in a different way - */ - if (page_flags_cleared(page) && !page_mapped(page) && - page_count(page) == 1) - return __is_movable_balloon_page(page); - - return false; + return PageBalloon(page) && PagePrivate(page); } /* * isolated_balloon_page - identify an isolated balloon page on private * compaction/migration page lists. - * - * After a compaction thread isolates a balloon page for migration, it raises - * the page refcount to prevent concurrent compaction threads from re-isolating - * the same page. For that reason putback_movable_pages(), or other routines - * that need to identify isolated balloon pages on private pagelists, cannot - * rely on balloon_page_movable() to accomplish the task. */ static inline bool isolated_balloon_page(struct page *page) { - /* Already isolated balloon pages, by default, have a raised refcount */ - if (page_flags_cleared(page) && !page_mapped(page) && - page_count(page) >= 2) - return __is_movable_balloon_page(page); - - return false; + return PageBalloon(page); } /* * balloon_page_insert - insert a page into the balloon's page list and make - * the page->mapping assignment accordingly. + * the page->private assignment accordingly. + * @balloon : pointer to balloon device * @page : page to be assigned as a 'balloon page' - * @mapping : allocated special 'balloon_mapping' - * @head : balloon's device page list head * * Caller must ensure the page is locked and the spin_lock protecting balloon * pages list is held before inserting a page into the balloon device. */ -static inline void balloon_page_insert(struct page *page, - struct address_space *mapping, - struct list_head *head) +static inline void balloon_page_insert(struct balloon_dev_info *balloon, + struct page *page) { - page->mapping = mapping; - list_add(&page->lru, head); + __SetPageBalloon(page); + SetPagePrivate(page); + set_page_private(page, (unsigned long)balloon); + list_add(&page->lru, &balloon->pages); } /* * balloon_page_delete - delete a page from balloon's page list and clear - * the page->mapping assignement accordingly. + * the page->private assignement accordingly. * @page : page to be released from balloon's page list * * Caller must ensure the page is locked and the spin_lock protecting balloon @@ -206,8 +139,12 @@ static inline void balloon_page_insert(struct page *page, */ static inline void balloon_page_delete(struct page *page) { - page->mapping = NULL; - list_del(&page->lru); + __ClearPageBalloon(page); + set_page_private(page, 0); + if (PagePrivate(page)) { + ClearPagePrivate(page); + list_del(&page->lru); + } } /* @@ -216,11 +153,7 @@ static inline void balloon_page_delete(struct page *page) */ static inline struct balloon_dev_info *balloon_page_device(struct page *page) { - struct address_space *mapping = page->mapping; - if (likely(mapping)) - return mapping->private_data; - - return NULL; + return (struct balloon_dev_info *)page_private(page); } static inline gfp_t balloon_mapping_gfp_mask(void) @@ -228,34 +161,24 @@ static inline gfp_t balloon_mapping_gfp_mask(void) return GFP_HIGHUSER_MOVABLE; } -static inline bool balloon_compaction_check(void) -{ - return true; -} - #else /* !CONFIG_BALLOON_COMPACTION */ -static inline void *balloon_mapping_alloc(void *balloon_device, - const struct address_space_operations *a_ops) -{ - return ERR_PTR(-EOPNOTSUPP); -} - -static inline void balloon_mapping_free(struct address_space *balloon_mapping) +static inline void balloon_page_insert(struct balloon_dev_info *balloon, + struct page *page) { - return; + __SetPageBalloon(page); + list_add(&page->lru, &balloon->pages); } -static inline void balloon_page_insert(struct page *page, - struct address_space *mapping, - struct list_head *head) +static inline void balloon_page_delete(struct page *page) { - list_add(&page->lru, head); + __ClearPageBalloon(page); + list_del(&page->lru); } -static inline void balloon_page_delete(struct page *page) +static inline bool __is_movable_balloon_page(struct page *page) { - list_del(&page->lru); + return false; } static inline bool balloon_page_movable(struct page *page) @@ -289,9 +212,5 @@ static inline gfp_t balloon_mapping_gfp_mask(void) return GFP_HIGHUSER; } -static inline bool balloon_compaction_check(void) -{ - return false; -} #endif /* CONFIG_BALLOON_COMPACTION */ #endif /* _LINUX_BALLOON_COMPACTION_H */ diff --git a/include/linux/bcma/bcma.h b/include/linux/bcma/bcma.h index 729f48e6b20b..eb1c6a47b67f 100644 --- a/include/linux/bcma/bcma.h +++ b/include/linux/bcma/bcma.h @@ -447,4 +447,6 @@ extern u32 bcma_chipco_pll_read(struct bcma_drv_cc *cc, u32 offset); #define BCMA_DMA_TRANSLATION_DMA64_CMT 0x80000000 /* Client Mode Translation for 64-bit DMA */ extern u32 bcma_core_dma_translation(struct bcma_device *core); +extern unsigned int bcma_core_irq(struct bcma_device *core, int num); + #endif /* LINUX_BCMA_H_ */ diff --git a/include/linux/bcma/bcma_driver_mips.h b/include/linux/bcma/bcma_driver_mips.h index fb61f3fb4ddb..0b3b32aeeb8a 100644 --- a/include/linux/bcma/bcma_driver_mips.h +++ b/include/linux/bcma/bcma_driver_mips.h @@ -43,12 +43,12 @@ struct bcma_drv_mips { extern void bcma_core_mips_init(struct bcma_drv_mips *mcore); extern void bcma_core_mips_early_init(struct bcma_drv_mips *mcore); -extern unsigned int bcma_core_irq(struct bcma_device *core); +extern unsigned int bcma_core_mips_irq(struct bcma_device *dev); #else static inline void bcma_core_mips_init(struct bcma_drv_mips *mcore) { } static inline void bcma_core_mips_early_init(struct bcma_drv_mips *mcore) { } -static inline unsigned int bcma_core_irq(struct bcma_device *core) +static inline unsigned int bcma_core_mips_irq(struct bcma_device *dev) { return 0; } diff --git a/include/linux/bio.h b/include/linux/bio.h index b39e5000ff58..7347f486ceca 100644 --- a/include/linux/bio.h +++ b/include/linux/bio.h @@ -292,7 +292,24 @@ static inline unsigned bio_segments(struct bio *bio) */ #define bio_get(bio) atomic_inc(&(bio)->bi_cnt) +enum bip_flags { + BIP_BLOCK_INTEGRITY = 1 << 0, /* block layer owns integrity data */ + BIP_MAPPED_INTEGRITY = 1 << 1, /* ref tag has been remapped */ + BIP_CTRL_NOCHECK = 1 << 2, /* disable HBA integrity checking */ + BIP_DISK_NOCHECK = 1 << 3, /* disable disk integrity checking */ + BIP_IP_CHECKSUM = 1 << 4, /* IP checksum */ +}; + #if defined(CONFIG_BLK_DEV_INTEGRITY) + +static inline struct bio_integrity_payload *bio_integrity(struct bio *bio) +{ + if (bio->bi_rw & REQ_INTEGRITY) + return bio->bi_integrity; + + return NULL; +} + /* * bio integrity payload */ @@ -301,21 +318,40 @@ struct bio_integrity_payload { struct bvec_iter bip_iter; - /* kill - should just use bip_vec */ - void *bip_buf; /* generated integrity data */ - bio_end_io_t *bip_end_io; /* saved I/O completion fn */ unsigned short bip_slab; /* slab the bip came from */ unsigned short bip_vcnt; /* # of integrity bio_vecs */ unsigned short bip_max_vcnt; /* integrity bio_vec slots */ - unsigned bip_owns_buf:1; /* should free bip_buf */ + unsigned short bip_flags; /* control flags */ struct work_struct bip_work; /* I/O completion */ struct bio_vec *bip_vec; struct bio_vec bip_inline_vecs[0];/* embedded bvec array */ }; + +static inline bool bio_integrity_flagged(struct bio *bio, enum bip_flags flag) +{ + struct bio_integrity_payload *bip = bio_integrity(bio); + + if (bip) + return bip->bip_flags & flag; + + return false; +} + +static inline sector_t bip_get_seed(struct bio_integrity_payload *bip) +{ + return bip->bip_iter.bi_sector; +} + +static inline void bip_set_seed(struct bio_integrity_payload *bip, + sector_t seed) +{ + bip->bip_iter.bi_sector = seed; +} + #endif /* CONFIG_BLK_DEV_INTEGRITY */ extern void bio_trim(struct bio *bio, int offset, int size); @@ -342,6 +378,7 @@ static inline struct bio *bio_next_split(struct bio *bio, int sectors, } extern struct bio_set *bioset_create(unsigned int, unsigned int); +extern struct bio_set *bioset_create_nobvec(unsigned int, unsigned int); extern void bioset_free(struct bio_set *); extern mempool_t *biovec_create_pool(int pool_entries); @@ -353,7 +390,6 @@ extern struct bio *bio_clone_fast(struct bio *, gfp_t, struct bio_set *); extern struct bio *bio_clone_bioset(struct bio *, gfp_t, struct bio_set *bs); extern struct bio_set *fs_bio_set; -unsigned int bio_integrity_tag_size(struct bio *bio); static inline struct bio *bio_alloc(gfp_t gfp_mask, unsigned int nr_iovecs) { @@ -661,14 +697,10 @@ struct biovec_slab { for_each_bio(_bio) \ bip_for_each_vec(_bvl, _bio->bi_integrity, _iter) -#define bio_integrity(bio) (bio->bi_integrity != NULL) - extern struct bio_integrity_payload *bio_integrity_alloc(struct bio *, gfp_t, unsigned int); extern void bio_integrity_free(struct bio *); extern int bio_integrity_add_page(struct bio *, struct page *, unsigned int, unsigned int); -extern int bio_integrity_enabled(struct bio *bio); -extern int bio_integrity_set_tag(struct bio *, void *, unsigned int); -extern int bio_integrity_get_tag(struct bio *, void *, unsigned int); +extern bool bio_integrity_enabled(struct bio *bio); extern int bio_integrity_prep(struct bio *); extern void bio_integrity_endio(struct bio *, int); extern void bio_integrity_advance(struct bio *, unsigned int); @@ -680,14 +712,14 @@ extern void bio_integrity_init(void); #else /* CONFIG_BLK_DEV_INTEGRITY */ -static inline int bio_integrity(struct bio *bio) +static inline void *bio_integrity(struct bio *bio) { - return 0; + return NULL; } -static inline int bio_integrity_enabled(struct bio *bio) +static inline bool bio_integrity_enabled(struct bio *bio) { - return 0; + return false; } static inline int bioset_integrity_create(struct bio_set *bs, int pool_size) @@ -733,6 +765,11 @@ static inline void bio_integrity_init(void) return; } +static inline bool bio_integrity_flagged(struct bio *bio, enum bip_flags flag) +{ + return false; +} + #endif /* CONFIG_BLK_DEV_INTEGRITY */ #endif /* CONFIG_BLOCK */ diff --git a/include/linux/bitops.h b/include/linux/bitops.h index cbc5833fb221..be5fd38bd5a0 100644 --- a/include/linux/bitops.h +++ b/include/linux/bitops.h @@ -32,26 +32,6 @@ extern unsigned long __sw_hweight64(__u64 w); */ #include <asm/bitops.h> -/* - * Provide __deprecated wrappers for the new interface, avoid flag day changes. - * We need the ugly external functions to break header recursion hell. - */ -#ifndef smp_mb__before_clear_bit -static inline void __deprecated smp_mb__before_clear_bit(void) -{ - extern void __smp_mb__before_atomic(void); - __smp_mb__before_atomic(); -} -#endif - -#ifndef smp_mb__after_clear_bit -static inline void __deprecated smp_mb__after_clear_bit(void) -{ - extern void __smp_mb__after_atomic(void); - __smp_mb__after_atomic(); -} -#endif - #define for_each_set_bit(bit, addr, size) \ for ((bit) = find_first_bit((addr), (size)); \ (bit) < (size); \ diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h index a1e31f274fcd..c9be1589415a 100644 --- a/include/linux/blk-mq.h +++ b/include/linux/blk-mq.h @@ -4,6 +4,7 @@ #include <linux/blkdev.h> struct blk_mq_tags; +struct blk_flush_queue; struct blk_mq_cpu_notifier { struct list_head list; @@ -34,6 +35,7 @@ struct blk_mq_hw_ctx { struct request_queue *queue; unsigned int queue_num; + struct blk_flush_queue *fq; void *driver_data; @@ -77,8 +79,9 @@ struct blk_mq_tag_set { struct list_head tag_list; }; -typedef int (queue_rq_fn)(struct blk_mq_hw_ctx *, struct request *); +typedef int (queue_rq_fn)(struct blk_mq_hw_ctx *, struct request *, bool); typedef struct blk_mq_hw_ctx *(map_queue_fn)(struct request_queue *, const int); +typedef enum blk_eh_timer_return (timeout_fn)(struct request *, bool); typedef int (init_hctx_fn)(struct blk_mq_hw_ctx *, void *, unsigned int); typedef void (exit_hctx_fn)(struct blk_mq_hw_ctx *, unsigned int); typedef int (init_request_fn)(void *, struct request *, unsigned int, @@ -86,6 +89,9 @@ typedef int (init_request_fn)(void *, struct request *, unsigned int, typedef void (exit_request_fn)(void *, struct request *, unsigned int, unsigned int); +typedef void (busy_iter_fn)(struct blk_mq_hw_ctx *, struct request *, void *, + bool); + struct blk_mq_ops { /* * Queue request @@ -100,7 +106,7 @@ struct blk_mq_ops { /* * Called on request timeout */ - rq_timed_out_fn *timeout; + timeout_fn *timeout; softirq_done_fn *complete; @@ -115,6 +121,10 @@ struct blk_mq_ops { /* * Called for every command allocated by the block layer to allow * the driver to set up driver specific data. + * + * Tag greater than or equal to queue_depth is for setting up + * flush request. + * * Ditto for exit/teardown. */ init_request_fn *init_request; @@ -140,6 +150,7 @@ enum { }; struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *); +void blk_mq_finish_init(struct request_queue *q); int blk_mq_register_disk(struct gendisk *); void blk_mq_unregister_disk(struct gendisk *); @@ -159,8 +170,9 @@ struct request *blk_mq_tag_to_rq(struct blk_mq_tags *tags, unsigned int tag); struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *, const int ctx_index); struct blk_mq_hw_ctx *blk_mq_alloc_single_hw_queue(struct blk_mq_tag_set *, unsigned int, int); -void blk_mq_end_io(struct request *rq, int error); -void __blk_mq_end_io(struct request *rq, int error); +void blk_mq_start_request(struct request *rq); +void blk_mq_end_request(struct request *rq, int error); +void __blk_mq_end_request(struct request *rq, int error); void blk_mq_requeue_request(struct request *rq); void blk_mq_add_to_requeue_list(struct request *rq, bool at_head); @@ -173,7 +185,8 @@ void blk_mq_stop_hw_queues(struct request_queue *q); void blk_mq_start_hw_queues(struct request_queue *q); void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async); void blk_mq_delay_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs); -void blk_mq_tag_busy_iter(struct blk_mq_tags *tags, void (*fn)(void *data, unsigned long *), void *data); +void blk_mq_tag_busy_iter(struct blk_mq_hw_ctx *hctx, busy_iter_fn *fn, + void *priv); /* * Driver command data is immediately after the request. So subtract request diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h index 66c2167f04a9..445d59231bc4 100644 --- a/include/linux/blk_types.h +++ b/include/linux/blk_types.h @@ -78,9 +78,11 @@ struct bio { struct io_context *bi_ioc; struct cgroup_subsys_state *bi_css; #endif + union { #if defined(CONFIG_BLK_DEV_INTEGRITY) - struct bio_integrity_payload *bi_integrity; /* data integrity */ + struct bio_integrity_payload *bi_integrity; /* data integrity */ #endif + }; unsigned short bi_vcnt; /* how many bio_vec's */ @@ -118,10 +120,8 @@ struct bio { #define BIO_USER_MAPPED 6 /* contains user pages */ #define BIO_EOPNOTSUPP 7 /* not supported */ #define BIO_NULL_MAPPED 8 /* contains invalid user pages */ -#define BIO_FS_INTEGRITY 9 /* fs owns integrity data, not block layer */ -#define BIO_QUIET 10 /* Make BIO Quiet */ -#define BIO_MAPPED_INTEGRITY 11/* integrity metadata has been remapped */ -#define BIO_SNAP_STABLE 12 /* bio data must be snapshotted during write */ +#define BIO_QUIET 9 /* Make BIO Quiet */ +#define BIO_SNAP_STABLE 10 /* bio data must be snapshotted during write */ /* * Flags starting here get preserved by bio_reset() - this includes @@ -162,6 +162,7 @@ enum rq_flag_bits { __REQ_WRITE_SAME, /* write same block many times */ __REQ_NOIDLE, /* don't anticipate more IO after this one */ + __REQ_INTEGRITY, /* I/O includes block integrity payload */ __REQ_FUA, /* forced unit access */ __REQ_FLUSH, /* request for cache flush */ @@ -186,9 +187,7 @@ enum rq_flag_bits { __REQ_FLUSH_SEQ, /* request for flush sequence */ __REQ_IO_STAT, /* account I/O stat */ __REQ_MIXED_MERGE, /* merge of different types, fail separately */ - __REQ_KERNEL, /* direct IO to kernel pages */ __REQ_PM, /* runtime pm request */ - __REQ_END, /* last of chain of requests */ __REQ_HASHED, /* on IO scheduler merge hash */ __REQ_MQ_INFLIGHT, /* track inflight for MQ */ __REQ_NR_BITS, /* stops here */ @@ -204,13 +203,14 @@ enum rq_flag_bits { #define REQ_DISCARD (1ULL << __REQ_DISCARD) #define REQ_WRITE_SAME (1ULL << __REQ_WRITE_SAME) #define REQ_NOIDLE (1ULL << __REQ_NOIDLE) +#define REQ_INTEGRITY (1ULL << __REQ_INTEGRITY) #define REQ_FAILFAST_MASK \ (REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | REQ_FAILFAST_DRIVER) #define REQ_COMMON_MASK \ (REQ_WRITE | REQ_FAILFAST_MASK | REQ_SYNC | REQ_META | REQ_PRIO | \ REQ_DISCARD | REQ_WRITE_SAME | REQ_NOIDLE | REQ_FLUSH | REQ_FUA | \ - REQ_SECURE) + REQ_SECURE | REQ_INTEGRITY) #define REQ_CLONE_MASK REQ_COMMON_MASK #define BIO_NO_ADVANCE_ITER_MASK (REQ_DISCARD|REQ_WRITE_SAME) @@ -240,9 +240,7 @@ enum rq_flag_bits { #define REQ_IO_STAT (1ULL << __REQ_IO_STAT) #define REQ_MIXED_MERGE (1ULL << __REQ_MIXED_MERGE) #define REQ_SECURE (1ULL << __REQ_SECURE) -#define REQ_KERNEL (1ULL << __REQ_KERNEL) #define REQ_PM (1ULL << __REQ_PM) -#define REQ_END (1ULL << __REQ_END) #define REQ_HASHED (1ULL << __REQ_HASHED) #define REQ_MQ_INFLIGHT (1ULL << __REQ_MQ_INFLIGHT) diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 518b46555b80..0207a78a8d82 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -36,6 +36,7 @@ struct request; struct sg_io_hdr; struct bsg_job; struct blkcg_gq; +struct blk_flush_queue; #define BLKDEV_MIN_RQ 4 #define BLKDEV_MAX_RQ 128 /* Default maximum */ @@ -455,14 +456,7 @@ struct request_queue { */ unsigned int flush_flags; unsigned int flush_not_queueable:1; - unsigned int flush_queue_delayed:1; - unsigned int flush_pending_idx:1; - unsigned int flush_running_idx:1; - unsigned long flush_pending_since; - struct list_head flush_queue[2]; - struct list_head flush_data_in_flight; - struct request *flush_rq; - spinlock_t mq_flush_lock; + struct blk_flush_queue *fq; struct list_head requeue_list; spinlock_t requeue_lock; @@ -865,7 +859,7 @@ extern void blk_execute_rq_nowait(struct request_queue *, struct gendisk *, static inline struct request_queue *bdev_get_queue(struct block_device *bdev) { - return bdev->bd_disk->queue; + return bdev->bd_disk->queue; /* this is never NULL */ } /* @@ -1285,10 +1279,9 @@ static inline int queue_alignment_offset(struct request_queue *q) static inline int queue_limit_alignment_offset(struct queue_limits *lim, sector_t sector) { unsigned int granularity = max(lim->physical_block_size, lim->io_min); - unsigned int alignment = (sector << 9) & (granularity - 1); + unsigned int alignment = sector_div(sector, granularity >> 9) << 9; - return (granularity + lim->alignment_offset - alignment) - & (granularity - 1); + return (granularity + lim->alignment_offset - alignment) % granularity; } static inline int bdev_alignment_offset(struct block_device *bdev) @@ -1464,32 +1457,31 @@ static inline uint64_t rq_io_start_time_ns(struct request *req) #if defined(CONFIG_BLK_DEV_INTEGRITY) -#define INTEGRITY_FLAG_READ 2 /* verify data integrity on read */ -#define INTEGRITY_FLAG_WRITE 4 /* generate data integrity on write */ +enum blk_integrity_flags { + BLK_INTEGRITY_VERIFY = 1 << 0, + BLK_INTEGRITY_GENERATE = 1 << 1, + BLK_INTEGRITY_DEVICE_CAPABLE = 1 << 2, + BLK_INTEGRITY_IP_CHECKSUM = 1 << 3, +}; -struct blk_integrity_exchg { +struct blk_integrity_iter { void *prot_buf; void *data_buf; - sector_t sector; + sector_t seed; unsigned int data_size; - unsigned short sector_size; + unsigned short interval; const char *disk_name; }; -typedef void (integrity_gen_fn) (struct blk_integrity_exchg *); -typedef int (integrity_vrfy_fn) (struct blk_integrity_exchg *); -typedef void (integrity_set_tag_fn) (void *, void *, unsigned int); -typedef void (integrity_get_tag_fn) (void *, void *, unsigned int); +typedef int (integrity_processing_fn) (struct blk_integrity_iter *); struct blk_integrity { - integrity_gen_fn *generate_fn; - integrity_vrfy_fn *verify_fn; - integrity_set_tag_fn *set_tag_fn; - integrity_get_tag_fn *get_tag_fn; + integrity_processing_fn *generate_fn; + integrity_processing_fn *verify_fn; unsigned short flags; unsigned short tuple_size; - unsigned short sector_size; + unsigned short interval; unsigned short tag_size; const char *name; @@ -1504,10 +1496,10 @@ extern int blk_integrity_compare(struct gendisk *, struct gendisk *); extern int blk_rq_map_integrity_sg(struct request_queue *, struct bio *, struct scatterlist *); extern int blk_rq_count_integrity_sg(struct request_queue *, struct bio *); -extern int blk_integrity_merge_rq(struct request_queue *, struct request *, - struct request *); -extern int blk_integrity_merge_bio(struct request_queue *, struct request *, - struct bio *); +extern bool blk_integrity_merge_rq(struct request_queue *, struct request *, + struct request *); +extern bool blk_integrity_merge_bio(struct request_queue *, struct request *, + struct bio *); static inline struct blk_integrity *bdev_get_integrity(struct block_device *bdev) @@ -1520,12 +1512,9 @@ static inline struct blk_integrity *blk_get_integrity(struct gendisk *disk) return disk->integrity; } -static inline int blk_integrity_rq(struct request *rq) +static inline bool blk_integrity_rq(struct request *rq) { - if (rq->bio == NULL) - return 0; - - return bio_integrity(rq->bio); + return rq->cmd_flags & REQ_INTEGRITY; } static inline void blk_queue_max_integrity_segments(struct request_queue *q, @@ -1564,7 +1553,7 @@ static inline int blk_rq_map_integrity_sg(struct request_queue *q, } static inline struct blk_integrity *bdev_get_integrity(struct block_device *b) { - return 0; + return NULL; } static inline struct blk_integrity *blk_get_integrity(struct gendisk *disk) { @@ -1590,15 +1579,15 @@ static inline unsigned short queue_max_integrity_segments(struct request_queue * { return 0; } -static inline int blk_integrity_merge_rq(struct request_queue *rq, - struct request *r1, - struct request *r2) +static inline bool blk_integrity_merge_rq(struct request_queue *rq, + struct request *r1, + struct request *r2) { return 0; } -static inline int blk_integrity_merge_bio(struct request_queue *rq, - struct request *r, - struct bio *b) +static inline bool blk_integrity_merge_bio(struct request_queue *rq, + struct request *r, + struct bio *b) { return 0; } diff --git a/include/linux/ceph/libceph.h b/include/linux/ceph/libceph.h index 279b0afac1c1..07bc359b88ac 100644 --- a/include/linux/ceph/libceph.h +++ b/include/linux/ceph/libceph.h @@ -211,7 +211,6 @@ extern struct page **ceph_get_direct_page_vector(const void __user *data, bool write_page); extern void ceph_put_page_vector(struct page **pages, int num_pages, bool dirty); -extern void ceph_release_page_vector(struct page **pages, int num_pages); extern struct page **ceph_alloc_page_vector(int num_pages, gfp_t flags); extern int ceph_copy_user_to_page_vector(struct page **pages, const void __user *data, diff --git a/include/linux/ceph/pagelist.h b/include/linux/ceph/pagelist.h index 9660d6b0a35d..5f871d84ddce 100644 --- a/include/linux/ceph/pagelist.h +++ b/include/linux/ceph/pagelist.h @@ -2,6 +2,7 @@ #define __FS_CEPH_PAGELIST_H #include <linux/list.h> +#include <linux/atomic.h> struct ceph_pagelist { struct list_head head; @@ -10,6 +11,7 @@ struct ceph_pagelist { size_t room; struct list_head free_list; size_t num_pages_free; + atomic_t refcnt; }; struct ceph_pagelist_cursor { @@ -26,9 +28,10 @@ static inline void ceph_pagelist_init(struct ceph_pagelist *pl) pl->room = 0; INIT_LIST_HEAD(&pl->free_list); pl->num_pages_free = 0; + atomic_set(&pl->refcnt, 1); } -extern int ceph_pagelist_release(struct ceph_pagelist *pl); +extern void ceph_pagelist_release(struct ceph_pagelist *pl); extern int ceph_pagelist_append(struct ceph_pagelist *pl, const void *d, size_t l); diff --git a/include/linux/ceph/rados.h b/include/linux/ceph/rados.h index f20e0d8a2155..2f822dca1046 100644 --- a/include/linux/ceph/rados.h +++ b/include/linux/ceph/rados.h @@ -172,6 +172,7 @@ extern const char *ceph_osd_state_name(int s); #define CEPH_OSD_OP_MODE_WR 0x2000 #define CEPH_OSD_OP_MODE_RMW 0x3000 #define CEPH_OSD_OP_MODE_SUB 0x4000 +#define CEPH_OSD_OP_MODE_CACHE 0x8000 #define CEPH_OSD_OP_TYPE 0x0f00 #define CEPH_OSD_OP_TYPE_LOCK 0x0100 @@ -181,103 +182,135 @@ extern const char *ceph_osd_state_name(int s); #define CEPH_OSD_OP_TYPE_PG 0x0500 #define CEPH_OSD_OP_TYPE_MULTI 0x0600 /* multiobject */ +#define __CEPH_OSD_OP1(mode, nr) \ + (CEPH_OSD_OP_MODE_##mode | (nr)) + +#define __CEPH_OSD_OP(mode, type, nr) \ + (CEPH_OSD_OP_MODE_##mode | CEPH_OSD_OP_TYPE_##type | (nr)) + +#define __CEPH_FORALL_OSD_OPS(f) \ + /** data **/ \ + /* read */ \ + f(READ, __CEPH_OSD_OP(RD, DATA, 1), "read") \ + f(STAT, __CEPH_OSD_OP(RD, DATA, 2), "stat") \ + f(MAPEXT, __CEPH_OSD_OP(RD, DATA, 3), "mapext") \ + \ + /* fancy read */ \ + f(MASKTRUNC, __CEPH_OSD_OP(RD, DATA, 4), "masktrunc") \ + f(SPARSE_READ, __CEPH_OSD_OP(RD, DATA, 5), "sparse-read") \ + \ + f(NOTIFY, __CEPH_OSD_OP(RD, DATA, 6), "notify") \ + f(NOTIFY_ACK, __CEPH_OSD_OP(RD, DATA, 7), "notify-ack") \ + \ + /* versioning */ \ + f(ASSERT_VER, __CEPH_OSD_OP(RD, DATA, 8), "assert-version") \ + \ + f(LIST_WATCHERS, __CEPH_OSD_OP(RD, DATA, 9), "list-watchers") \ + \ + f(LIST_SNAPS, __CEPH_OSD_OP(RD, DATA, 10), "list-snaps") \ + \ + /* sync */ \ + f(SYNC_READ, __CEPH_OSD_OP(RD, DATA, 11), "sync_read") \ + \ + /* write */ \ + f(WRITE, __CEPH_OSD_OP(WR, DATA, 1), "write") \ + f(WRITEFULL, __CEPH_OSD_OP(WR, DATA, 2), "writefull") \ + f(TRUNCATE, __CEPH_OSD_OP(WR, DATA, 3), "truncate") \ + f(ZERO, __CEPH_OSD_OP(WR, DATA, 4), "zero") \ + f(DELETE, __CEPH_OSD_OP(WR, DATA, 5), "delete") \ + \ + /* fancy write */ \ + f(APPEND, __CEPH_OSD_OP(WR, DATA, 6), "append") \ + f(STARTSYNC, __CEPH_OSD_OP(WR, DATA, 7), "startsync") \ + f(SETTRUNC, __CEPH_OSD_OP(WR, DATA, 8), "settrunc") \ + f(TRIMTRUNC, __CEPH_OSD_OP(WR, DATA, 9), "trimtrunc") \ + \ + f(TMAPUP, __CEPH_OSD_OP(RMW, DATA, 10), "tmapup") \ + f(TMAPPUT, __CEPH_OSD_OP(WR, DATA, 11), "tmapput") \ + f(TMAPGET, __CEPH_OSD_OP(RD, DATA, 12), "tmapget") \ + \ + f(CREATE, __CEPH_OSD_OP(WR, DATA, 13), "create") \ + f(ROLLBACK, __CEPH_OSD_OP(WR, DATA, 14), "rollback") \ + \ + f(WATCH, __CEPH_OSD_OP(WR, DATA, 15), "watch") \ + \ + /* omap */ \ + f(OMAPGETKEYS, __CEPH_OSD_OP(RD, DATA, 17), "omap-get-keys") \ + f(OMAPGETVALS, __CEPH_OSD_OP(RD, DATA, 18), "omap-get-vals") \ + f(OMAPGETHEADER, __CEPH_OSD_OP(RD, DATA, 19), "omap-get-header") \ + f(OMAPGETVALSBYKEYS, __CEPH_OSD_OP(RD, DATA, 20), "omap-get-vals-by-keys") \ + f(OMAPSETVALS, __CEPH_OSD_OP(WR, DATA, 21), "omap-set-vals") \ + f(OMAPSETHEADER, __CEPH_OSD_OP(WR, DATA, 22), "omap-set-header") \ + f(OMAPCLEAR, __CEPH_OSD_OP(WR, DATA, 23), "omap-clear") \ + f(OMAPRMKEYS, __CEPH_OSD_OP(WR, DATA, 24), "omap-rm-keys") \ + f(OMAP_CMP, __CEPH_OSD_OP(RD, DATA, 25), "omap-cmp") \ + \ + /* tiering */ \ + f(COPY_FROM, __CEPH_OSD_OP(WR, DATA, 26), "copy-from") \ + f(COPY_GET_CLASSIC, __CEPH_OSD_OP(RD, DATA, 27), "copy-get-classic") \ + f(UNDIRTY, __CEPH_OSD_OP(WR, DATA, 28), "undirty") \ + f(ISDIRTY, __CEPH_OSD_OP(RD, DATA, 29), "isdirty") \ + f(COPY_GET, __CEPH_OSD_OP(RD, DATA, 30), "copy-get") \ + f(CACHE_FLUSH, __CEPH_OSD_OP(CACHE, DATA, 31), "cache-flush") \ + f(CACHE_EVICT, __CEPH_OSD_OP(CACHE, DATA, 32), "cache-evict") \ + f(CACHE_TRY_FLUSH, __CEPH_OSD_OP(CACHE, DATA, 33), "cache-try-flush") \ + \ + /* convert tmap to omap */ \ + f(TMAP2OMAP, __CEPH_OSD_OP(RMW, DATA, 34), "tmap2omap") \ + \ + /* hints */ \ + f(SETALLOCHINT, __CEPH_OSD_OP(WR, DATA, 35), "set-alloc-hint") \ + \ + /** multi **/ \ + f(CLONERANGE, __CEPH_OSD_OP(WR, MULTI, 1), "clonerange") \ + f(ASSERT_SRC_VERSION, __CEPH_OSD_OP(RD, MULTI, 2), "assert-src-version") \ + f(SRC_CMPXATTR, __CEPH_OSD_OP(RD, MULTI, 3), "src-cmpxattr") \ + \ + /** attrs **/ \ + /* read */ \ + f(GETXATTR, __CEPH_OSD_OP(RD, ATTR, 1), "getxattr") \ + f(GETXATTRS, __CEPH_OSD_OP(RD, ATTR, 2), "getxattrs") \ + f(CMPXATTR, __CEPH_OSD_OP(RD, ATTR, 3), "cmpxattr") \ + \ + /* write */ \ + f(SETXATTR, __CEPH_OSD_OP(WR, ATTR, 1), "setxattr") \ + f(SETXATTRS, __CEPH_OSD_OP(WR, ATTR, 2), "setxattrs") \ + f(RESETXATTRS, __CEPH_OSD_OP(WR, ATTR, 3), "resetxattrs") \ + f(RMXATTR, __CEPH_OSD_OP(WR, ATTR, 4), "rmxattr") \ + \ + /** subop **/ \ + f(PULL, __CEPH_OSD_OP1(SUB, 1), "pull") \ + f(PUSH, __CEPH_OSD_OP1(SUB, 2), "push") \ + f(BALANCEREADS, __CEPH_OSD_OP1(SUB, 3), "balance-reads") \ + f(UNBALANCEREADS, __CEPH_OSD_OP1(SUB, 4), "unbalance-reads") \ + f(SCRUB, __CEPH_OSD_OP1(SUB, 5), "scrub") \ + f(SCRUB_RESERVE, __CEPH_OSD_OP1(SUB, 6), "scrub-reserve") \ + f(SCRUB_UNRESERVE, __CEPH_OSD_OP1(SUB, 7), "scrub-unreserve") \ + f(SCRUB_STOP, __CEPH_OSD_OP1(SUB, 8), "scrub-stop") \ + f(SCRUB_MAP, __CEPH_OSD_OP1(SUB, 9), "scrub-map") \ + \ + /** lock **/ \ + f(WRLOCK, __CEPH_OSD_OP(WR, LOCK, 1), "wrlock") \ + f(WRUNLOCK, __CEPH_OSD_OP(WR, LOCK, 2), "wrunlock") \ + f(RDLOCK, __CEPH_OSD_OP(WR, LOCK, 3), "rdlock") \ + f(RDUNLOCK, __CEPH_OSD_OP(WR, LOCK, 4), "rdunlock") \ + f(UPLOCK, __CEPH_OSD_OP(WR, LOCK, 5), "uplock") \ + f(DNLOCK, __CEPH_OSD_OP(WR, LOCK, 6), "dnlock") \ + \ + /** exec **/ \ + /* note: the RD bit here is wrong; see special-case below in helper */ \ + f(CALL, __CEPH_OSD_OP(RD, EXEC, 1), "call") \ + \ + /** pg **/ \ + f(PGLS, __CEPH_OSD_OP(RD, PG, 1), "pgls") \ + f(PGLS_FILTER, __CEPH_OSD_OP(RD, PG, 2), "pgls-filter") \ + f(PG_HITSET_LS, __CEPH_OSD_OP(RD, PG, 3), "pg-hitset-ls") \ + f(PG_HITSET_GET, __CEPH_OSD_OP(RD, PG, 4), "pg-hitset-get") + enum { - /** data **/ - /* read */ - CEPH_OSD_OP_READ = CEPH_OSD_OP_MODE_RD | CEPH_OSD_OP_TYPE_DATA | 1, - CEPH_OSD_OP_STAT = CEPH_OSD_OP_MODE_RD | CEPH_OSD_OP_TYPE_DATA | 2, - CEPH_OSD_OP_MAPEXT = CEPH_OSD_OP_MODE_RD | CEPH_OSD_OP_TYPE_DATA | 3, - - /* fancy read */ - CEPH_OSD_OP_MASKTRUNC = CEPH_OSD_OP_MODE_RD | CEPH_OSD_OP_TYPE_DATA | 4, - CEPH_OSD_OP_SPARSE_READ = CEPH_OSD_OP_MODE_RD | CEPH_OSD_OP_TYPE_DATA | 5, - - CEPH_OSD_OP_NOTIFY = CEPH_OSD_OP_MODE_RD | CEPH_OSD_OP_TYPE_DATA | 6, - CEPH_OSD_OP_NOTIFY_ACK = CEPH_OSD_OP_MODE_RD | CEPH_OSD_OP_TYPE_DATA | 7, - - /* versioning */ - CEPH_OSD_OP_ASSERT_VER = CEPH_OSD_OP_MODE_RD | CEPH_OSD_OP_TYPE_DATA | 8, - - /* write */ - CEPH_OSD_OP_WRITE = CEPH_OSD_OP_MODE_WR | CEPH_OSD_OP_TYPE_DATA | 1, - CEPH_OSD_OP_WRITEFULL = CEPH_OSD_OP_MODE_WR | CEPH_OSD_OP_TYPE_DATA | 2, - CEPH_OSD_OP_TRUNCATE = CEPH_OSD_OP_MODE_WR | CEPH_OSD_OP_TYPE_DATA | 3, - CEPH_OSD_OP_ZERO = CEPH_OSD_OP_MODE_WR | CEPH_OSD_OP_TYPE_DATA | 4, - CEPH_OSD_OP_DELETE = CEPH_OSD_OP_MODE_WR | CEPH_OSD_OP_TYPE_DATA | 5, - - /* fancy write */ - CEPH_OSD_OP_APPEND = CEPH_OSD_OP_MODE_WR | CEPH_OSD_OP_TYPE_DATA | 6, - CEPH_OSD_OP_STARTSYNC = CEPH_OSD_OP_MODE_WR | CEPH_OSD_OP_TYPE_DATA | 7, - CEPH_OSD_OP_SETTRUNC = CEPH_OSD_OP_MODE_WR | CEPH_OSD_OP_TYPE_DATA | 8, - CEPH_OSD_OP_TRIMTRUNC = CEPH_OSD_OP_MODE_WR | CEPH_OSD_OP_TYPE_DATA | 9, - - CEPH_OSD_OP_TMAPUP = CEPH_OSD_OP_MODE_RMW | CEPH_OSD_OP_TYPE_DATA | 10, - CEPH_OSD_OP_TMAPPUT = CEPH_OSD_OP_MODE_WR | CEPH_OSD_OP_TYPE_DATA | 11, - CEPH_OSD_OP_TMAPGET = CEPH_OSD_OP_MODE_RD | CEPH_OSD_OP_TYPE_DATA | 12, - - CEPH_OSD_OP_CREATE = CEPH_OSD_OP_MODE_WR | CEPH_OSD_OP_TYPE_DATA | 13, - CEPH_OSD_OP_ROLLBACK= CEPH_OSD_OP_MODE_WR | CEPH_OSD_OP_TYPE_DATA | 14, - - CEPH_OSD_OP_WATCH = CEPH_OSD_OP_MODE_WR | CEPH_OSD_OP_TYPE_DATA | 15, - - /* omap */ - CEPH_OSD_OP_OMAPGETKEYS = CEPH_OSD_OP_MODE_RD | CEPH_OSD_OP_TYPE_DATA | 17, - CEPH_OSD_OP_OMAPGETVALS = CEPH_OSD_OP_MODE_RD | CEPH_OSD_OP_TYPE_DATA | 18, - CEPH_OSD_OP_OMAPGETHEADER = CEPH_OSD_OP_MODE_RD | CEPH_OSD_OP_TYPE_DATA | 19, - CEPH_OSD_OP_OMAPGETVALSBYKEYS = - CEPH_OSD_OP_MODE_RD | CEPH_OSD_OP_TYPE_DATA | 20, - CEPH_OSD_OP_OMAPSETVALS = CEPH_OSD_OP_MODE_WR | CEPH_OSD_OP_TYPE_DATA | 21, - CEPH_OSD_OP_OMAPSETHEADER = CEPH_OSD_OP_MODE_WR | CEPH_OSD_OP_TYPE_DATA | 22, - CEPH_OSD_OP_OMAPCLEAR = CEPH_OSD_OP_MODE_WR | CEPH_OSD_OP_TYPE_DATA | 23, - CEPH_OSD_OP_OMAPRMKEYS = CEPH_OSD_OP_MODE_WR | CEPH_OSD_OP_TYPE_DATA | 24, - CEPH_OSD_OP_OMAP_CMP = CEPH_OSD_OP_MODE_RD | CEPH_OSD_OP_TYPE_DATA | 25, - - /* hints */ - CEPH_OSD_OP_SETALLOCHINT = CEPH_OSD_OP_MODE_WR | CEPH_OSD_OP_TYPE_DATA | 35, - - /** multi **/ - CEPH_OSD_OP_CLONERANGE = CEPH_OSD_OP_MODE_WR | CEPH_OSD_OP_TYPE_MULTI | 1, - CEPH_OSD_OP_ASSERT_SRC_VERSION = CEPH_OSD_OP_MODE_RD | CEPH_OSD_OP_TYPE_MULTI | 2, - CEPH_OSD_OP_SRC_CMPXATTR = CEPH_OSD_OP_MODE_RD | CEPH_OSD_OP_TYPE_MULTI | 3, - - /** attrs **/ - /* read */ - CEPH_OSD_OP_GETXATTR = CEPH_OSD_OP_MODE_RD | CEPH_OSD_OP_TYPE_ATTR | 1, - CEPH_OSD_OP_GETXATTRS = CEPH_OSD_OP_MODE_RD | CEPH_OSD_OP_TYPE_ATTR | 2, - CEPH_OSD_OP_CMPXATTR = CEPH_OSD_OP_MODE_RD | CEPH_OSD_OP_TYPE_ATTR | 3, - - /* write */ - CEPH_OSD_OP_SETXATTR = CEPH_OSD_OP_MODE_WR | CEPH_OSD_OP_TYPE_ATTR | 1, - CEPH_OSD_OP_SETXATTRS = CEPH_OSD_OP_MODE_WR | CEPH_OSD_OP_TYPE_ATTR | 2, - CEPH_OSD_OP_RESETXATTRS = CEPH_OSD_OP_MODE_WR|CEPH_OSD_OP_TYPE_ATTR | 3, - CEPH_OSD_OP_RMXATTR = CEPH_OSD_OP_MODE_WR | CEPH_OSD_OP_TYPE_ATTR | 4, - - /** subop **/ - CEPH_OSD_OP_PULL = CEPH_OSD_OP_MODE_SUB | 1, - CEPH_OSD_OP_PUSH = CEPH_OSD_OP_MODE_SUB | 2, - CEPH_OSD_OP_BALANCEREADS = CEPH_OSD_OP_MODE_SUB | 3, - CEPH_OSD_OP_UNBALANCEREADS = CEPH_OSD_OP_MODE_SUB | 4, - CEPH_OSD_OP_SCRUB = CEPH_OSD_OP_MODE_SUB | 5, - CEPH_OSD_OP_SCRUB_RESERVE = CEPH_OSD_OP_MODE_SUB | 6, - CEPH_OSD_OP_SCRUB_UNRESERVE = CEPH_OSD_OP_MODE_SUB | 7, - CEPH_OSD_OP_SCRUB_STOP = CEPH_OSD_OP_MODE_SUB | 8, - CEPH_OSD_OP_SCRUB_MAP = CEPH_OSD_OP_MODE_SUB | 9, - - /** lock **/ - CEPH_OSD_OP_WRLOCK = CEPH_OSD_OP_MODE_WR | CEPH_OSD_OP_TYPE_LOCK | 1, - CEPH_OSD_OP_WRUNLOCK = CEPH_OSD_OP_MODE_WR | CEPH_OSD_OP_TYPE_LOCK | 2, - CEPH_OSD_OP_RDLOCK = CEPH_OSD_OP_MODE_WR | CEPH_OSD_OP_TYPE_LOCK | 3, - CEPH_OSD_OP_RDUNLOCK = CEPH_OSD_OP_MODE_WR | CEPH_OSD_OP_TYPE_LOCK | 4, - CEPH_OSD_OP_UPLOCK = CEPH_OSD_OP_MODE_WR | CEPH_OSD_OP_TYPE_LOCK | 5, - CEPH_OSD_OP_DNLOCK = CEPH_OSD_OP_MODE_WR | CEPH_OSD_OP_TYPE_LOCK | 6, - - /** exec **/ - /* note: the RD bit here is wrong; see special-case below in helper */ - CEPH_OSD_OP_CALL = CEPH_OSD_OP_MODE_RD | CEPH_OSD_OP_TYPE_EXEC | 1, - - /** pg **/ - CEPH_OSD_OP_PGLS = CEPH_OSD_OP_MODE_RD | CEPH_OSD_OP_TYPE_PG | 1, - CEPH_OSD_OP_PGLS_FILTER = CEPH_OSD_OP_MODE_RD | CEPH_OSD_OP_TYPE_PG | 2, +#define GENERATE_ENUM_ENTRY(op, opcode, str) CEPH_OSD_OP_##op = (opcode), +__CEPH_FORALL_OSD_OPS(GENERATE_ENUM_ENTRY) +#undef GENERATE_ENUM_ENTRY }; static inline int ceph_osd_op_type_lock(int op) diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h index b5223c570eba..1d5196889048 100644 --- a/include/linux/cgroup.h +++ b/include/linux/cgroup.h @@ -27,7 +27,6 @@ struct cgroup_root; struct cgroup_subsys; -struct inode; struct cgroup; extern int cgroup_init_early(void); @@ -38,7 +37,8 @@ extern void cgroup_exit(struct task_struct *p); extern int cgroupstats_build(struct cgroupstats *stats, struct dentry *dentry); -extern int proc_cgroup_show(struct seq_file *, void *); +extern int proc_cgroup_show(struct seq_file *m, struct pid_namespace *ns, + struct pid *pid, struct task_struct *tsk); /* define the enumeration of all cgroup subsystems */ #define SUBSYS(_x) _x ## _cgrp_id, @@ -161,11 +161,6 @@ static inline void css_put(struct cgroup_subsys_state *css) /* bits in struct cgroup flags field */ enum { - /* - * Control Group has previously had a child cgroup or a task, - * but no longer (only if CGRP_NOTIFY_ON_RELEASE is set) - */ - CGRP_RELEASABLE, /* Control Group requires release notifications to userspace */ CGRP_NOTIFY_ON_RELEASE, /* @@ -235,13 +230,6 @@ struct cgroup { struct list_head e_csets[CGROUP_SUBSYS_COUNT]; /* - * Linked list running through all cgroups that can - * potentially be reaped by the release agent. Protected by - * release_list_lock - */ - struct list_head release_list; - - /* * list of pidlists, up to two for each namespace (one for procs, one * for tasks); created on demand. */ @@ -250,6 +238,9 @@ struct cgroup { /* used to wait for offlining of csses */ wait_queue_head_t offline_waitq; + + /* used to schedule release agent */ + struct work_struct release_agent_work; }; #define MAX_CGROUP_ROOT_NAMELEN 64 @@ -536,13 +527,10 @@ static inline bool cgroup_has_tasks(struct cgroup *cgrp) return !list_empty(&cgrp->cset_links); } -/* returns ino associated with a cgroup, 0 indicates unmounted root */ +/* returns ino associated with a cgroup */ static inline ino_t cgroup_ino(struct cgroup *cgrp) { - if (cgrp->kn) - return cgrp->kn->ino; - else - return 0; + return cgrp->kn->ino; } /* cft/css accessors for cftype->write() operation */ diff --git a/include/linux/clk-private.h b/include/linux/clk-private.h index efbf70b9fd84..0ca5f6046920 100644 --- a/include/linux/clk-private.h +++ b/include/linux/clk-private.h @@ -46,8 +46,10 @@ struct clk { unsigned int enable_count; unsigned int prepare_count; unsigned long accuracy; + int phase; struct hlist_head children; struct hlist_node child_node; + struct hlist_node debug_node; unsigned int notifier_count; #ifdef CONFIG_DEBUG_FS struct dentry *dentry; diff --git a/include/linux/clk-provider.h b/include/linux/clk-provider.h index 411dd7eb2653..be21af149f11 100644 --- a/include/linux/clk-provider.h +++ b/include/linux/clk-provider.h @@ -13,6 +13,7 @@ #include <linux/clk.h> #include <linux/io.h> +#include <linux/of.h> #ifdef CONFIG_COMMON_CLK @@ -129,6 +130,14 @@ struct dentry; * set then clock accuracy will be initialized to parent accuracy * or 0 (perfect clock) if clock has no parent. * + * @get_phase: Queries the hardware to get the current phase of a clock. + * Returned values are 0-359 degrees on success, negative + * error codes on failure. + * + * @set_phase: Shift the phase this clock signal in degrees specified + * by the second argument. Valid values for degrees are + * 0-359. Return 0 on success, otherwise -EERROR. + * * @init: Perform platform-specific initialization magic. * This is not not used by any of the basic clock types. * Please consider other ways of solving initialization problems @@ -177,6 +186,8 @@ struct clk_ops { unsigned long parent_rate, u8 index); unsigned long (*recalc_accuracy)(struct clk_hw *hw, unsigned long parent_accuracy); + int (*get_phase)(struct clk_hw *hw); + int (*set_phase)(struct clk_hw *hw, int degrees); void (*init)(struct clk_hw *hw); int (*debug_init)(struct clk_hw *hw, struct dentry *dentry); }; @@ -488,6 +499,28 @@ struct clk *clk_register_composite(struct device *dev, const char *name, struct clk_hw *gate_hw, const struct clk_ops *gate_ops, unsigned long flags); +/*** + * struct clk_gpio_gate - gpio gated clock + * + * @hw: handle between common and hardware-specific interfaces + * @gpiod: gpio descriptor + * + * Clock with a gpio control for enabling and disabling the parent clock. + * Implements .enable, .disable and .is_enabled + */ + +struct clk_gpio { + struct clk_hw hw; + struct gpio_desc *gpiod; +}; + +extern const struct clk_ops clk_gpio_gate_ops; +struct clk *clk_register_gpio_gate(struct device *dev, const char *name, + const char *parent_name, struct gpio_desc *gpio, + unsigned long flags); + +void of_gpio_clk_gate_setup(struct device_node *node); + /** * clk_register - allocate a new clock, register it and return an opaque cookie * @dev: device that is registering this clock diff --git a/include/linux/clk.h b/include/linux/clk.h index afb44bfaf8d1..c7f258a81761 100644 --- a/include/linux/clk.h +++ b/include/linux/clk.h @@ -106,6 +106,25 @@ int clk_notifier_unregister(struct clk *clk, struct notifier_block *nb); */ long clk_get_accuracy(struct clk *clk); +/** + * clk_set_phase - adjust the phase shift of a clock signal + * @clk: clock signal source + * @degrees: number of degrees the signal is shifted + * + * Shifts the phase of a clock signal by the specified degrees. Returns 0 on + * success, -EERROR otherwise. + */ +int clk_set_phase(struct clk *clk, int degrees); + +/** + * clk_get_phase - return the phase shift of a clock signal + * @clk: clock signal source + * + * Returns the phase shift of a clock node in degrees, otherwise returns + * -EERROR. + */ +int clk_get_phase(struct clk *clk); + #else static inline long clk_get_accuracy(struct clk *clk) @@ -113,6 +132,16 @@ static inline long clk_get_accuracy(struct clk *clk) return -ENOTSUPP; } +static inline long clk_set_phase(struct clk *clk, int phase) +{ + return -ENOTSUPP; +} + +static inline long clk_get_phase(struct clk *clk) +{ + return -ENOTSUPP; +} + #endif /** diff --git a/include/linux/clk/ti.h b/include/linux/clk/ti.h index e8d8a35034a5..f75acbf70e96 100644 --- a/include/linux/clk/ti.h +++ b/include/linux/clk/ti.h @@ -292,6 +292,7 @@ void omap2xxx_clkt_vps_init(void); void __iomem *ti_clk_get_reg_addr(struct device_node *node, int index); void ti_dt_clocks_register(struct ti_dt_clk *oclks); void ti_dt_clk_init_provider(struct device_node *np, int index); +void ti_dt_clk_init_retry_clks(void); void ti_dt_clockdomains_setup(void); int ti_clk_retry_init(struct device_node *node, struct clk_hw *hw, ti_of_clk_init_cb_t func); diff --git a/include/linux/cma.h b/include/linux/cma.h index 371b93042520..0430ed05d3b9 100644 --- a/include/linux/cma.h +++ b/include/linux/cma.h @@ -22,6 +22,9 @@ extern int __init cma_declare_contiguous(phys_addr_t size, phys_addr_t base, phys_addr_t limit, phys_addr_t alignment, unsigned int order_per_bit, bool fixed, struct cma **res_cma); +extern int cma_init_reserved_mem(phys_addr_t size, + phys_addr_t base, int order_per_bit, + struct cma **res_cma); extern struct page *cma_alloc(struct cma *cma, int count, unsigned int align); extern bool cma_release(struct cma *cma, struct page *pages, int count); #endif diff --git a/include/linux/compaction.h b/include/linux/compaction.h index 01e3132820da..60bdf8dc02a3 100644 --- a/include/linux/compaction.h +++ b/include/linux/compaction.h @@ -2,14 +2,24 @@ #define _LINUX_COMPACTION_H /* Return values for compact_zone() and try_to_compact_pages() */ +/* compaction didn't start as it was deferred due to past failures */ +#define COMPACT_DEFERRED 0 /* compaction didn't start as it was not possible or direct reclaim was more suitable */ -#define COMPACT_SKIPPED 0 +#define COMPACT_SKIPPED 1 /* compaction should continue to another pageblock */ -#define COMPACT_CONTINUE 1 +#define COMPACT_CONTINUE 2 /* direct compaction partially compacted a zone and there are suitable pages */ -#define COMPACT_PARTIAL 2 +#define COMPACT_PARTIAL 3 /* The full zone was compacted */ -#define COMPACT_COMPLETE 3 +#define COMPACT_COMPLETE 4 + +/* Used to signal whether compaction detected need_sched() or lock contention */ +/* No contention detected */ +#define COMPACT_CONTENDED_NONE 0 +/* Either need_sched() was true or fatal signal pending */ +#define COMPACT_CONTENDED_SCHED 1 +/* Zone lock or lru_lock was contended in async compaction */ +#define COMPACT_CONTENDED_LOCK 2 #ifdef CONFIG_COMPACTION extern int sysctl_compact_memory; @@ -22,7 +32,8 @@ extern int sysctl_extfrag_handler(struct ctl_table *table, int write, extern int fragmentation_index(struct zone *zone, unsigned int order); extern unsigned long try_to_compact_pages(struct zonelist *zonelist, int order, gfp_t gfp_mask, nodemask_t *mask, - enum migrate_mode mode, bool *contended); + enum migrate_mode mode, int *contended, + struct zone **candidate_zone); extern void compact_pgdat(pg_data_t *pgdat, int order); extern void reset_isolation_suitable(pg_data_t *pgdat); extern unsigned long compaction_suitable(struct zone *zone, int order); @@ -91,7 +102,8 @@ static inline bool compaction_restarting(struct zone *zone, int order) #else static inline unsigned long try_to_compact_pages(struct zonelist *zonelist, int order, gfp_t gfp_mask, nodemask_t *nodemask, - enum migrate_mode mode, bool *contended) + enum migrate_mode mode, int *contended, + struct zone **candidate_zone) { return COMPACT_CONTINUE; } diff --git a/include/linux/compiler-gcc5.h b/include/linux/compiler-gcc5.h new file mode 100644 index 000000000000..cdd1cc202d51 --- /dev/null +++ b/include/linux/compiler-gcc5.h @@ -0,0 +1,66 @@ +#ifndef __LINUX_COMPILER_H +#error "Please don't include <linux/compiler-gcc5.h> directly, include <linux/compiler.h> instead." +#endif + +#define __used __attribute__((__used__)) +#define __must_check __attribute__((warn_unused_result)) +#define __compiler_offsetof(a, b) __builtin_offsetof(a, b) + +/* Mark functions as cold. gcc will assume any path leading to a call + to them will be unlikely. This means a lot of manual unlikely()s + are unnecessary now for any paths leading to the usual suspects + like BUG(), printk(), panic() etc. [but let's keep them for now for + older compilers] + + Early snapshots of gcc 4.3 don't support this and we can't detect this + in the preprocessor, but we can live with this because they're unreleased. + Maketime probing would be overkill here. + + gcc also has a __attribute__((__hot__)) to move hot functions into + a special section, but I don't see any sense in this right now in + the kernel context */ +#define __cold __attribute__((__cold__)) + +#define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __COUNTER__) + +#ifndef __CHECKER__ +# define __compiletime_warning(message) __attribute__((warning(message))) +# define __compiletime_error(message) __attribute__((error(message))) +#endif /* __CHECKER__ */ + +/* + * Mark a position in code as unreachable. This can be used to + * suppress control flow warnings after asm blocks that transfer + * control elsewhere. + * + * Early snapshots of gcc 4.5 don't support this and we can't detect + * this in the preprocessor, but we can live with this because they're + * unreleased. Really, we need to have autoconf for the kernel. + */ +#define unreachable() __builtin_unreachable() + +/* Mark a function definition as prohibited from being cloned. */ +#define __noclone __attribute__((__noclone__)) + +/* + * Tell the optimizer that something else uses this function or variable. + */ +#define __visible __attribute__((externally_visible)) + +/* + * GCC 'asm goto' miscompiles certain code sequences: + * + * http://gcc.gnu.org/bugzilla/show_bug.cgi?id=58670 + * + * Work it around via a compiler barrier quirk suggested by Jakub Jelinek. + * Fixed in GCC 4.8.2 and later versions. + * + * (asm goto is automatically volatile - the naming reflects this.) + */ +#define asm_volatile_goto(x...) do { asm goto(x); asm (""); } while (0) + +#ifdef CONFIG_ARCH_USE_BUILTIN_BSWAP +#define __HAVE_BUILTIN_BSWAP32__ +#define __HAVE_BUILTIN_BSWAP64__ +#define __HAVE_BUILTIN_BSWAP16__ +#endif /* CONFIG_ARCH_USE_BUILTIN_BSWAP */ diff --git a/include/linux/cpu.h b/include/linux/cpu.h index 95978ad7fcdd..b2d9a43012b2 100644 --- a/include/linux/cpu.h +++ b/include/linux/cpu.h @@ -213,6 +213,7 @@ extern struct bus_type cpu_subsys; extern void cpu_hotplug_begin(void); extern void cpu_hotplug_done(void); extern void get_online_cpus(void); +extern bool try_get_online_cpus(void); extern void put_online_cpus(void); extern void cpu_hotplug_disable(void); extern void cpu_hotplug_enable(void); @@ -230,6 +231,7 @@ int cpu_down(unsigned int cpu); static inline void cpu_hotplug_begin(void) {} static inline void cpu_hotplug_done(void) {} #define get_online_cpus() do { } while (0) +#define try_get_online_cpus() true #define put_online_cpus() do { } while (0) #define cpu_hotplug_disable() do { } while (0) #define cpu_hotplug_enable() do { } while (0) diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h index 7d1955afa62c..138336b6bb04 100644 --- a/include/linux/cpufreq.h +++ b/include/linux/cpufreq.h @@ -112,6 +112,9 @@ struct cpufreq_policy { spinlock_t transition_lock; wait_queue_head_t transition_wait; struct task_struct *transition_task; /* Task which is doing the transition */ + + /* For cpufreq driver's internal use */ + void *driver_data; }; /* Only for ACPI */ diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h index 2997af6d2ccd..0a9a6da21e74 100644 --- a/include/linux/cpumask.h +++ b/include/linux/cpumask.h @@ -666,10 +666,19 @@ static inline size_t cpumask_size(void) * * This code makes NR_CPUS length memcopy and brings to a memory corruption. * cpumask_copy() provide safe copy functionality. + * + * Note that there is another evil here: If you define a cpumask_var_t + * as a percpu variable then the way to obtain the address of the cpumask + * structure differently influences what this_cpu_* operation needs to be + * used. Please use this_cpu_cpumask_var_t in those cases. The direct use + * of this_cpu_ptr() or this_cpu_read() will lead to failures when the + * other type of cpumask_var_t implementation is configured. */ #ifdef CONFIG_CPUMASK_OFFSTACK typedef struct cpumask *cpumask_var_t; +#define this_cpu_cpumask_var_ptr(x) this_cpu_read(x) + bool alloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, int node); bool alloc_cpumask_var(cpumask_var_t *mask, gfp_t flags); bool zalloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, int node); @@ -681,6 +690,8 @@ void free_bootmem_cpumask_var(cpumask_var_t mask); #else typedef struct cpumask cpumask_var_t[1]; +#define this_cpu_cpumask_var_ptr(x) this_cpu_ptr(x) + static inline bool alloc_cpumask_var(cpumask_var_t *mask, gfp_t flags) { return true; diff --git a/include/linux/cpuset.h b/include/linux/cpuset.h index 6e39c9bb0dae..2f073db7392e 100644 --- a/include/linux/cpuset.h +++ b/include/linux/cpuset.h @@ -86,7 +86,8 @@ extern void __cpuset_memory_pressure_bump(void); extern void cpuset_task_status_allowed(struct seq_file *m, struct task_struct *task); -extern int proc_cpuset_show(struct seq_file *, void *); +extern int proc_cpuset_show(struct seq_file *m, struct pid_namespace *ns, + struct pid *pid, struct task_struct *tsk); extern int cpuset_mem_spread_node(void); extern int cpuset_slab_spread_node(void); diff --git a/include/linux/crc-t10dif.h b/include/linux/crc-t10dif.h index b3cb71f0d3b0..cf53d0773ce3 100644 --- a/include/linux/crc-t10dif.h +++ b/include/linux/crc-t10dif.h @@ -6,7 +6,8 @@ #define CRC_T10DIF_DIGEST_SIZE 2 #define CRC_T10DIF_BLOCK_SIZE 1 -__u16 crc_t10dif_generic(__u16 crc, const unsigned char *buffer, size_t len); -__u16 crc_t10dif(unsigned char const *, size_t); +extern __u16 crc_t10dif_generic(__u16 crc, const unsigned char *buffer, + size_t len); +extern __u16 crc_t10dif(unsigned char const *, size_t); #endif diff --git a/include/linux/dcache.h b/include/linux/dcache.h index 75a227cc7ce2..b2a2a08523bf 100644 --- a/include/linux/dcache.h +++ b/include/linux/dcache.h @@ -11,7 +11,6 @@ #include <linux/rcupdate.h> #include <linux/lockref.h> -struct nameidata; struct path; struct vfsmount; @@ -226,11 +225,6 @@ struct dentry_operations { extern seqlock_t rename_lock; -static inline int dname_external(const struct dentry *dentry) -{ - return dentry->d_name.name != dentry->d_iname; -} - /* * These are the low-level FS interfaces to the dcache.. */ @@ -254,7 +248,7 @@ extern struct dentry * d_obtain_root(struct inode *); extern void shrink_dcache_sb(struct super_block *); extern void shrink_dcache_parent(struct dentry *); extern void shrink_dcache_for_umount(struct super_block *); -extern int d_invalidate(struct dentry *); +extern void d_invalidate(struct dentry *); /* only used at mount-time */ extern struct dentry * d_make_root(struct inode *); @@ -269,7 +263,6 @@ extern void d_prune_aliases(struct inode *); /* test whether we have any submounts in a subdir tree */ extern int have_submounts(struct dentry *); -extern int check_submounts_and_drop(struct dentry *); /* * This adds the entry to the hash queues. diff --git a/include/linux/device.h b/include/linux/device.h index a608e237f0a8..ce1f21608b16 100644 --- a/include/linux/device.h +++ b/include/linux/device.h @@ -181,13 +181,14 @@ extern int bus_unregister_notifier(struct bus_type *bus, * with the device lock held in the core, so be careful. */ #define BUS_NOTIFY_ADD_DEVICE 0x00000001 /* device added */ -#define BUS_NOTIFY_DEL_DEVICE 0x00000002 /* device removed */ -#define BUS_NOTIFY_BIND_DRIVER 0x00000003 /* driver about to be +#define BUS_NOTIFY_DEL_DEVICE 0x00000002 /* device to be removed */ +#define BUS_NOTIFY_REMOVED_DEVICE 0x00000003 /* device removed */ +#define BUS_NOTIFY_BIND_DRIVER 0x00000004 /* driver about to be bound */ -#define BUS_NOTIFY_BOUND_DRIVER 0x00000004 /* driver bound to device */ -#define BUS_NOTIFY_UNBIND_DRIVER 0x00000005 /* driver about to be +#define BUS_NOTIFY_BOUND_DRIVER 0x00000005 /* driver bound to device */ +#define BUS_NOTIFY_UNBIND_DRIVER 0x00000006 /* driver about to be unbound */ -#define BUS_NOTIFY_UNBOUND_DRIVER 0x00000006 /* driver is unbound +#define BUS_NOTIFY_UNBOUND_DRIVER 0x00000007 /* driver is unbound from the device */ extern struct kset *bus_get_kset(struct bus_type *bus); diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h index 931b70986272..d5d388160f42 100644 --- a/include/linux/dma-mapping.h +++ b/include/linux/dma-mapping.h @@ -263,6 +263,32 @@ struct dma_attrs; #define dma_unmap_sg_attrs(dev, sgl, nents, dir, attrs) \ dma_unmap_sg(dev, sgl, nents, dir) +#else +static inline void *dma_alloc_writecombine(struct device *dev, size_t size, + dma_addr_t *dma_addr, gfp_t gfp) +{ + DEFINE_DMA_ATTRS(attrs); + dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs); + return dma_alloc_attrs(dev, size, dma_addr, gfp, &attrs); +} + +static inline void dma_free_writecombine(struct device *dev, size_t size, + void *cpu_addr, dma_addr_t dma_addr) +{ + DEFINE_DMA_ATTRS(attrs); + dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs); + return dma_free_attrs(dev, size, cpu_addr, dma_addr, &attrs); +} + +static inline int dma_mmap_writecombine(struct device *dev, + struct vm_area_struct *vma, + void *cpu_addr, dma_addr_t dma_addr, + size_t size) +{ + DEFINE_DMA_ATTRS(attrs); + dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs); + return dma_mmap_attrs(dev, vma, cpu_addr, dma_addr, size, &attrs); +} #endif /* CONFIG_HAVE_DMA_ATTRS */ #ifdef CONFIG_NEED_DMA_MAP_STATE diff --git a/include/linux/dma/dw.h b/include/linux/dma/dw.h new file mode 100644 index 000000000000..71456442ebe3 --- /dev/null +++ b/include/linux/dma/dw.h @@ -0,0 +1,64 @@ +/* + * Driver for the Synopsys DesignWare DMA Controller + * + * Copyright (C) 2007 Atmel Corporation + * Copyright (C) 2010-2011 ST Microelectronics + * Copyright (C) 2014 Intel Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#ifndef _DMA_DW_H +#define _DMA_DW_H + +#include <linux/clk.h> +#include <linux/device.h> +#include <linux/dmaengine.h> + +#include <linux/platform_data/dma-dw.h> + +struct dw_dma; + +/** + * struct dw_dma_chip - representation of DesignWare DMA controller hardware + * @dev: struct device of the DMA controller + * @irq: irq line + * @regs: memory mapped I/O space + * @clk: hclk clock + * @dw: struct dw_dma that is filed by dw_dma_probe() + */ +struct dw_dma_chip { + struct device *dev; + int irq; + void __iomem *regs; + struct clk *clk; + struct dw_dma *dw; +}; + +/* Export to the platform drivers */ +int dw_dma_probe(struct dw_dma_chip *chip, struct dw_dma_platform_data *pdata); +int dw_dma_remove(struct dw_dma_chip *chip); + +/* DMA API extensions */ +struct dw_desc; + +struct dw_cyclic_desc { + struct dw_desc **desc; + unsigned long periods; + void (*period_callback)(void *param); + void *period_callback_param; +}; + +struct dw_cyclic_desc *dw_dma_cyclic_prep(struct dma_chan *chan, + dma_addr_t buf_addr, size_t buf_len, size_t period_len, + enum dma_transfer_direction direction); +void dw_dma_cyclic_free(struct dma_chan *chan); +int dw_dma_cyclic_start(struct dma_chan *chan); +void dw_dma_cyclic_stop(struct dma_chan *chan); + +dma_addr_t dw_dma_get_src_addr(struct dma_chan *chan); + +dma_addr_t dw_dma_get_dst_addr(struct dma_chan *chan); + +#endif /* _DMA_DW_H */ diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h index 212c5b9ac106..653a1fd07ae8 100644 --- a/include/linux/dmaengine.h +++ b/include/linux/dmaengine.h @@ -199,15 +199,12 @@ enum dma_ctrl_flags { * configuration data in statically from the platform). An additional * argument of struct dma_slave_config must be passed in with this * command. - * @FSLDMA_EXTERNAL_START: this command will put the Freescale DMA controller - * into external start mode. */ enum dma_ctrl_cmd { DMA_TERMINATE_ALL, DMA_PAUSE, DMA_RESUME, DMA_SLAVE_CONFIG, - FSLDMA_EXTERNAL_START, }; /** @@ -307,7 +304,9 @@ enum dma_slave_buswidth { * struct dma_slave_config - dma slave channel runtime config * @direction: whether the data shall go in or out on this slave * channel, right now. DMA_MEM_TO_DEV and DMA_DEV_TO_MEM are - * legal values. + * legal values. DEPRECATED, drivers should use the direction argument + * to the device_prep_slave_sg and device_prep_dma_cyclic functions or + * the dir field in the dma_interleaved_template structure. * @src_addr: this is the physical address where DMA slave data * should be read (RX), if the source is memory this argument is * ignored. @@ -755,6 +754,16 @@ static inline struct dma_async_tx_descriptor *dmaengine_prep_interleaved_dma( return chan->device->device_prep_interleaved_dma(chan, xt, flags); } +static inline struct dma_async_tx_descriptor *dmaengine_prep_dma_sg( + struct dma_chan *chan, + struct scatterlist *dst_sg, unsigned int dst_nents, + struct scatterlist *src_sg, unsigned int src_nents, + unsigned long flags) +{ + return chan->device->device_prep_dma_sg(chan, dst_sg, dst_nents, + src_sg, src_nents, flags); +} + static inline int dma_get_slave_caps(struct dma_chan *chan, struct dma_slave_caps *caps) { if (!chan || !caps) diff --git a/include/linux/dmar.h b/include/linux/dmar.h index 1deece46a0ca..593fff99e6bf 100644 --- a/include/linux/dmar.h +++ b/include/linux/dmar.h @@ -56,13 +56,19 @@ struct dmar_drhd_unit { struct intel_iommu *iommu; }; +struct dmar_pci_path { + u8 bus; + u8 device; + u8 function; +}; + struct dmar_pci_notify_info { struct pci_dev *dev; unsigned long event; int bus; u16 seg; u16 level; - struct acpi_dmar_pci_path path[]; + struct dmar_pci_path path[]; } __attribute__((packed)); extern struct rw_semaphore dmar_global_lock; diff --git a/include/linux/dw_dmac.h b/include/linux/dw_dmac.h deleted file mode 100644 index 68b4024184de..000000000000 --- a/include/linux/dw_dmac.h +++ /dev/null @@ -1,111 +0,0 @@ -/* - * Driver for the Synopsys DesignWare DMA Controller - * - * Copyright (C) 2007 Atmel Corporation - * Copyright (C) 2010-2011 ST Microelectronics - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ -#ifndef DW_DMAC_H -#define DW_DMAC_H - -#include <linux/dmaengine.h> - -/** - * struct dw_dma_slave - Controller-specific information about a slave - * - * @dma_dev: required DMA master device. Depricated. - * @bus_id: name of this device channel, not just a device name since - * devices may have more than one channel e.g. "foo_tx" - * @cfg_hi: Platform-specific initializer for the CFG_HI register - * @cfg_lo: Platform-specific initializer for the CFG_LO register - * @src_master: src master for transfers on allocated channel. - * @dst_master: dest master for transfers on allocated channel. - */ -struct dw_dma_slave { - struct device *dma_dev; - u32 cfg_hi; - u32 cfg_lo; - u8 src_master; - u8 dst_master; -}; - -/** - * struct dw_dma_platform_data - Controller configuration parameters - * @nr_channels: Number of channels supported by hardware (max 8) - * @is_private: The device channels should be marked as private and not for - * by the general purpose DMA channel allocator. - * @chan_allocation_order: Allocate channels starting from 0 or 7 - * @chan_priority: Set channel priority increasing from 0 to 7 or 7 to 0. - * @block_size: Maximum block size supported by the controller - * @nr_masters: Number of AHB masters supported by the controller - * @data_width: Maximum data width supported by hardware per AHB master - * (0 - 8bits, 1 - 16bits, ..., 5 - 256bits) - */ -struct dw_dma_platform_data { - unsigned int nr_channels; - bool is_private; -#define CHAN_ALLOCATION_ASCENDING 0 /* zero to seven */ -#define CHAN_ALLOCATION_DESCENDING 1 /* seven to zero */ - unsigned char chan_allocation_order; -#define CHAN_PRIORITY_ASCENDING 0 /* chan0 highest */ -#define CHAN_PRIORITY_DESCENDING 1 /* chan7 highest */ - unsigned char chan_priority; - unsigned short block_size; - unsigned char nr_masters; - unsigned char data_width[4]; -}; - -/* bursts size */ -enum dw_dma_msize { - DW_DMA_MSIZE_1, - DW_DMA_MSIZE_4, - DW_DMA_MSIZE_8, - DW_DMA_MSIZE_16, - DW_DMA_MSIZE_32, - DW_DMA_MSIZE_64, - DW_DMA_MSIZE_128, - DW_DMA_MSIZE_256, -}; - -/* Platform-configurable bits in CFG_HI */ -#define DWC_CFGH_FCMODE (1 << 0) -#define DWC_CFGH_FIFO_MODE (1 << 1) -#define DWC_CFGH_PROTCTL(x) ((x) << 2) -#define DWC_CFGH_SRC_PER(x) ((x) << 7) -#define DWC_CFGH_DST_PER(x) ((x) << 11) - -/* Platform-configurable bits in CFG_LO */ -#define DWC_CFGL_LOCK_CH_XFER (0 << 12) /* scope of LOCK_CH */ -#define DWC_CFGL_LOCK_CH_BLOCK (1 << 12) -#define DWC_CFGL_LOCK_CH_XACT (2 << 12) -#define DWC_CFGL_LOCK_BUS_XFER (0 << 14) /* scope of LOCK_BUS */ -#define DWC_CFGL_LOCK_BUS_BLOCK (1 << 14) -#define DWC_CFGL_LOCK_BUS_XACT (2 << 14) -#define DWC_CFGL_LOCK_CH (1 << 15) /* channel lockout */ -#define DWC_CFGL_LOCK_BUS (1 << 16) /* busmaster lockout */ -#define DWC_CFGL_HS_DST_POL (1 << 18) /* dst handshake active low */ -#define DWC_CFGL_HS_SRC_POL (1 << 19) /* src handshake active low */ - -/* DMA API extensions */ -struct dw_cyclic_desc { - struct dw_desc **desc; - unsigned long periods; - void (*period_callback)(void *param); - void *period_callback_param; -}; - -struct dw_cyclic_desc *dw_dma_cyclic_prep(struct dma_chan *chan, - dma_addr_t buf_addr, size_t buf_len, size_t period_len, - enum dma_transfer_direction direction); -void dw_dma_cyclic_free(struct dma_chan *chan); -int dw_dma_cyclic_start(struct dma_chan *chan); -void dw_dma_cyclic_stop(struct dma_chan *chan); - -dma_addr_t dw_dma_get_src_addr(struct dma_chan *chan); - -dma_addr_t dw_dma_get_dst_addr(struct dma_chan *chan); - -#endif /* DW_DMAC_H */ diff --git a/include/linux/flex_proportions.h b/include/linux/flex_proportions.h index 4ebc49fae391..0d348e011a6e 100644 --- a/include/linux/flex_proportions.h +++ b/include/linux/flex_proportions.h @@ -10,6 +10,7 @@ #include <linux/percpu_counter.h> #include <linux/spinlock.h> #include <linux/seqlock.h> +#include <linux/gfp.h> /* * When maximum proportion of some event type is specified, this is the @@ -32,7 +33,7 @@ struct fprop_global { seqcount_t sequence; }; -int fprop_global_init(struct fprop_global *p); +int fprop_global_init(struct fprop_global *p, gfp_t gfp); void fprop_global_destroy(struct fprop_global *p); bool fprop_new_period(struct fprop_global *p, int periods); @@ -79,7 +80,7 @@ struct fprop_local_percpu { raw_spinlock_t lock; /* Protect period and numerator */ }; -int fprop_local_init_percpu(struct fprop_local_percpu *pl); +int fprop_local_init_percpu(struct fprop_local_percpu *pl, gfp_t gfp); void fprop_local_destroy_percpu(struct fprop_local_percpu *pl); void __fprop_inc_percpu(struct fprop_global *p, struct fprop_local_percpu *pl); void __fprop_inc_percpu_max(struct fprop_global *p, struct fprop_local_percpu *pl, diff --git a/include/linux/font.h b/include/linux/font.h index 40a24ab41b36..d6821769dd1e 100644 --- a/include/linux/font.h +++ b/include/linux/font.h @@ -31,6 +31,7 @@ struct font_desc { #define SUN12x22_IDX 7 #define ACORN8x8_IDX 8 #define MINI4x6_IDX 9 +#define FONT6x10_IDX 10 extern const struct font_desc font_vga_8x8, font_vga_8x16, @@ -41,7 +42,8 @@ extern const struct font_desc font_vga_8x8, font_sun_8x16, font_sun_12x22, font_acorn_8x8, - font_mini_4x6; + font_mini_4x6, + font_6x10; /* Find a font with a specific name */ diff --git a/include/linux/fs.h b/include/linux/fs.h index 94187721ad41..a957d4366c24 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -192,8 +192,6 @@ typedef void (dio_iodone_t)(struct kiocb *iocb, loff_t offset, #define READ 0 #define WRITE RW_MASK #define READA RWA_MASK -#define KERNEL_READ (READ|REQ_KERNEL) -#define KERNEL_WRITE (WRITE|REQ_KERNEL) #define READ_SYNC (READ | REQ_SYNC) #define WRITE_SYNC (WRITE | REQ_SYNC | REQ_NOIDLE) @@ -851,13 +849,7 @@ static inline struct file *get_file(struct file *f) */ #define FILE_LOCK_DEFERRED 1 -/* - * The POSIX file lock owner is determined by - * the "struct files_struct" in the thread group - * (or NULL for no owner - BSD locks). - * - * Lockd stuffs a "host" pointer into this. - */ +/* legacy typedef, should eventually be removed */ typedef void *fl_owner_t; struct file_lock_operations { @@ -868,10 +860,13 @@ struct file_lock_operations { struct lock_manager_operations { int (*lm_compare_owner)(struct file_lock *, struct file_lock *); unsigned long (*lm_owner_key)(struct file_lock *); + void (*lm_get_owner)(struct file_lock *, struct file_lock *); + void (*lm_put_owner)(struct file_lock *); void (*lm_notify)(struct file_lock *); /* unblock callback */ - int (*lm_grant)(struct file_lock *, struct file_lock *, int); - void (*lm_break)(struct file_lock *); - int (*lm_change)(struct file_lock **, int); + int (*lm_grant)(struct file_lock *, int); + bool (*lm_break)(struct file_lock *); + int (*lm_change)(struct file_lock **, int, struct list_head *); + void (*lm_setup)(struct file_lock *, void **); }; struct lock_manager { @@ -966,7 +961,7 @@ void locks_free_lock(struct file_lock *fl); extern void locks_init_lock(struct file_lock *); extern struct file_lock * locks_alloc_lock(void); extern void locks_copy_lock(struct file_lock *, struct file_lock *); -extern void __locks_copy_lock(struct file_lock *, const struct file_lock *); +extern void locks_copy_conflock(struct file_lock *, struct file_lock *); extern void locks_remove_posix(struct file *, fl_owner_t); extern void locks_remove_file(struct file *); extern void locks_release_private(struct file_lock *); @@ -980,11 +975,9 @@ extern int vfs_cancel_lock(struct file *filp, struct file_lock *fl); extern int flock_lock_file_wait(struct file *filp, struct file_lock *fl); extern int __break_lease(struct inode *inode, unsigned int flags, unsigned int type); extern void lease_get_mtime(struct inode *, struct timespec *time); -extern int generic_setlease(struct file *, long, struct file_lock **); -extern int vfs_setlease(struct file *, long, struct file_lock **); -extern int lease_modify(struct file_lock **, int); -extern int lock_may_read(struct inode *, loff_t start, unsigned long count); -extern int lock_may_write(struct inode *, loff_t start, unsigned long count); +extern int generic_setlease(struct file *, long, struct file_lock **, void **priv); +extern int vfs_setlease(struct file *, long, struct file_lock **, void **); +extern int lease_modify(struct file_lock **, int, struct list_head *); #else /* !CONFIG_FILE_LOCKING */ static inline int fcntl_getlk(struct file *file, unsigned int cmd, struct flock __user *user) @@ -1013,12 +1006,12 @@ static inline int fcntl_setlk64(unsigned int fd, struct file *file, #endif static inline int fcntl_setlease(unsigned int fd, struct file *filp, long arg) { - return 0; + return -EINVAL; } static inline int fcntl_getlease(struct file *filp) { - return 0; + return F_UNLCK; } static inline void locks_init_lock(struct file_lock *fl) @@ -1026,7 +1019,7 @@ static inline void locks_init_lock(struct file_lock *fl) return; } -static inline void __locks_copy_lock(struct file_lock *new, struct file_lock *fl) +static inline void locks_copy_conflock(struct file_lock *new, struct file_lock *fl) { return; } @@ -1100,33 +1093,22 @@ static inline void lease_get_mtime(struct inode *inode, struct timespec *time) } static inline int generic_setlease(struct file *filp, long arg, - struct file_lock **flp) + struct file_lock **flp, void **priv) { return -EINVAL; } static inline int vfs_setlease(struct file *filp, long arg, - struct file_lock **lease) + struct file_lock **lease, void **priv) { return -EINVAL; } -static inline int lease_modify(struct file_lock **before, int arg) +static inline int lease_modify(struct file_lock **before, int arg, + struct list_head *dispose) { return -EINVAL; } - -static inline int lock_may_read(struct inode *inode, loff_t start, - unsigned long len) -{ - return 1; -} - -static inline int lock_may_write(struct inode *inode, loff_t start, - unsigned long len) -{ - return 1; -} #endif /* !CONFIG_FILE_LOCKING */ @@ -1151,8 +1133,8 @@ extern void fasync_free(struct fasync_struct *); /* can be called from interrupts */ extern void kill_fasync(struct fasync_struct **, int, int); -extern int __f_setown(struct file *filp, struct pid *, enum pid_type, int force); -extern int f_setown(struct file *filp, unsigned long arg, int force); +extern void __f_setown(struct file *filp, struct pid *, enum pid_type, int force); +extern void f_setown(struct file *filp, unsigned long arg, int force); extern void f_delown(struct file *filp); extern pid_t f_getown(struct file *filp); extern int send_sigurg(struct fown_struct *fown); @@ -1506,7 +1488,7 @@ struct file_operations { int (*flock) (struct file *, int, struct file_lock *); ssize_t (*splice_write)(struct pipe_inode_info *, struct file *, loff_t *, size_t, unsigned int); ssize_t (*splice_read)(struct file *, loff_t *, struct pipe_inode_info *, size_t, unsigned int); - int (*setlease)(struct file *, long, struct file_lock **); + int (*setlease)(struct file *, long, struct file_lock **, void **); long (*fallocate)(struct file *file, int mode, loff_t offset, loff_t len); int (*show_fdinfo)(struct seq_file *m, struct file *f); @@ -1855,7 +1837,8 @@ extern struct vfsmount *kern_mount_data(struct file_system_type *, void *data); extern void kern_unmount(struct vfsmount *mnt); extern int may_umount_tree(struct vfsmount *); extern int may_umount(struct vfsmount *); -extern long do_mount(const char *, const char *, const char *, unsigned long, void *); +extern long do_mount(const char *, const char __user *, + const char *, unsigned long, void *); extern struct vfsmount *collect_mounts(struct path *); extern void drop_collected_mounts(struct vfsmount *); extern int iterate_mounts(int (*)(struct vfsmount *, void *), void *, @@ -1874,7 +1857,7 @@ extern int current_umask(void); extern void ihold(struct inode * inode); extern void iput(struct inode *); -static inline struct inode *file_inode(struct file *f) +static inline struct inode *file_inode(const struct file *f) { return f->f_inode; } @@ -2611,6 +2594,7 @@ extern int simple_write_end(struct file *file, struct address_space *mapping, struct page *page, void *fsdata); extern int always_delete_dentry(const struct dentry *); extern struct inode *alloc_anon_inode(struct super_block *); +extern int simple_nosetlease(struct file *, long, struct file_lock **, void **); extern const struct dentry_operations simple_dentry_operations; extern struct dentry *simple_lookup(struct inode *, struct dentry *, unsigned int flags); diff --git a/include/linux/fsl_ifc.h b/include/linux/fsl_ifc.h index f49ddb1b2273..84d60cb841b1 100644 --- a/include/linux/fsl_ifc.h +++ b/include/linux/fsl_ifc.h @@ -781,13 +781,13 @@ struct fsl_ifc_regs { __be32 amask; u32 res4[0x2]; } amask_cs[FSL_IFC_BANK_COUNT]; - u32 res5[0x17]; + u32 res5[0x18]; struct { - __be32 csor_ext; __be32 csor; + __be32 csor_ext; u32 res6; } csor_cs[FSL_IFC_BANK_COUNT]; - u32 res7[0x19]; + u32 res7[0x18]; struct { __be32 ftim[4]; u32 res8[0x8]; diff --git a/include/linux/fsldma.h b/include/linux/fsldma.h new file mode 100644 index 000000000000..b213c02963c9 --- /dev/null +++ b/include/linux/fsldma.h @@ -0,0 +1,13 @@ +/* + * This is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#ifndef FSL_DMA_H +#define FSL_DMA_H +/* fsl dma API for enxternal start */ +int fsl_dma_external_start(struct dma_chan *dchan, int enable); + +#endif diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h index f0b0edbf55a9..662697babd48 100644 --- a/include/linux/ftrace.h +++ b/include/linux/ftrace.h @@ -56,6 +56,8 @@ struct ftrace_ops; typedef void (*ftrace_func_t)(unsigned long ip, unsigned long parent_ip, struct ftrace_ops *op, struct pt_regs *regs); +ftrace_func_t ftrace_ops_get_func(struct ftrace_ops *ops); + /* * FTRACE_OPS_FL_* bits denote the state of ftrace_ops struct and are * set in the flags member. @@ -89,6 +91,9 @@ typedef void (*ftrace_func_t)(unsigned long ip, unsigned long parent_ip, * INITIALIZED - The ftrace_ops has already been initialized (first use time * register_ftrace_function() is called, it will initialized the ops) * DELETED - The ops are being deleted, do not let them be registered again. + * ADDING - The ops is in the process of being added. + * REMOVING - The ops is in the process of being removed. + * MODIFYING - The ops is in the process of changing its filter functions. */ enum { FTRACE_OPS_FL_ENABLED = 1 << 0, @@ -100,6 +105,9 @@ enum { FTRACE_OPS_FL_STUB = 1 << 6, FTRACE_OPS_FL_INITIALIZED = 1 << 7, FTRACE_OPS_FL_DELETED = 1 << 8, + FTRACE_OPS_FL_ADDING = 1 << 9, + FTRACE_OPS_FL_REMOVING = 1 << 10, + FTRACE_OPS_FL_MODIFYING = 1 << 11, }; #ifdef CONFIG_DYNAMIC_FTRACE @@ -132,7 +140,7 @@ struct ftrace_ops { int nr_trampolines; struct ftrace_ops_hash local_hash; struct ftrace_ops_hash *func_hash; - struct ftrace_hash *tramp_hash; + struct ftrace_ops_hash old_hash; unsigned long trampoline; #endif }; diff --git a/include/linux/genalloc.h b/include/linux/genalloc.h index 1c2fdaa2ffc3..1ccaab44abcc 100644 --- a/include/linux/genalloc.h +++ b/include/linux/genalloc.h @@ -110,6 +110,10 @@ extern void gen_pool_set_algo(struct gen_pool *pool, genpool_algo_t algo, extern unsigned long gen_pool_first_fit(unsigned long *map, unsigned long size, unsigned long start, unsigned int nr, void *data); +extern unsigned long gen_pool_first_fit_order_align(unsigned long *map, + unsigned long size, unsigned long start, unsigned int nr, + void *data); + extern unsigned long gen_pool_best_fit(unsigned long *map, unsigned long size, unsigned long start, unsigned int nr, void *data); @@ -117,6 +121,9 @@ extern struct gen_pool *devm_gen_pool_create(struct device *dev, int min_alloc_order, int nid); extern struct gen_pool *dev_get_gen_pool(struct device *dev); +bool addr_in_gen_pool(struct gen_pool *pool, unsigned long start, + size_t size); + #ifdef CONFIG_OF extern struct gen_pool *of_get_named_gen_pool(struct device_node *np, const char *propname, int index); diff --git a/include/linux/genl_magic_func.h b/include/linux/genl_magic_func.h index c0894dd8827b..667c31101b8b 100644 --- a/include/linux/genl_magic_func.h +++ b/include/linux/genl_magic_func.h @@ -178,12 +178,12 @@ static int s_name ## _from_attrs_for_change(struct s_name *s, \ #define __assign(attr_nr, attr_flag, name, nla_type, type, assignment...) \ nla = ntb[attr_nr]; \ if (nla) { \ - if (exclude_invariants && ((attr_flag) & DRBD_F_INVARIANT)) { \ + if (exclude_invariants && !!((attr_flag) & DRBD_F_INVARIANT)) { \ pr_info("<< must not change invariant attr: %s\n", #name); \ return -EEXIST; \ } \ assignment; \ - } else if (exclude_invariants && ((attr_flag) & DRBD_F_INVARIANT)) { \ + } else if (exclude_invariants && !!((attr_flag) & DRBD_F_INVARIANT)) { \ /* attribute missing from payload, */ \ /* which was expected */ \ } else if ((attr_flag) & DRBD_F_REQUIRED) { \ diff --git a/include/linux/gfp.h b/include/linux/gfp.h index 5e7219dc0fae..41b30fd4d041 100644 --- a/include/linux/gfp.h +++ b/include/linux/gfp.h @@ -156,7 +156,7 @@ struct vm_area_struct; #define GFP_DMA32 __GFP_DMA32 /* Convert GFP flags to their corresponding migrate type */ -static inline int allocflags_to_migratetype(gfp_t gfp_flags) +static inline int gfpflags_to_migratetype(const gfp_t gfp_flags) { WARN_ON((gfp_flags & GFP_MOVABLE_MASK) == GFP_MOVABLE_MASK); diff --git a/include/linux/gpio/driver.h b/include/linux/gpio/driver.h index c5e41da20112..249db3057e4d 100644 --- a/include/linux/gpio/driver.h +++ b/include/linux/gpio/driver.h @@ -56,6 +56,8 @@ struct seq_file; * as the chip access may sleep when e.g. reading out the IRQ status * registers. * @exported: flags if the gpiochip is exported for use from sysfs. Private. + * @irq_not_threaded: flag must be set if @can_sleep is set but the + * IRQs don't need to be threaded * * A gpio_chip can help platforms abstract various sources of GPIOs so * they can all be accessed through a common programing interface. @@ -101,6 +103,7 @@ struct gpio_chip { struct gpio_desc *desc; const char *const *names; bool can_sleep; + bool irq_not_threaded; bool exported; #ifdef CONFIG_GPIOLIB_IRQCHIP @@ -141,7 +144,7 @@ extern const char *gpiochip_is_requested(struct gpio_chip *chip, /* add/remove chips */ extern int gpiochip_add(struct gpio_chip *chip); -extern int gpiochip_remove(struct gpio_chip *chip); +extern void gpiochip_remove(struct gpio_chip *chip); extern struct gpio_chip *gpiochip_find(void *data, int (*match)(struct gpio_chip *chip, void *data)); @@ -166,7 +169,8 @@ int gpiochip_irqchip_add(struct gpio_chip *gpiochip, #endif /* CONFIG_GPIOLIB_IRQCHIP */ -int gpiochip_request_own_desc(struct gpio_desc *desc, const char *label); +struct gpio_desc *gpiochip_request_own_desc(struct gpio_chip *chip, u16 hwnum, + const char *label); void gpiochip_free_own_desc(struct gpio_desc *desc); #else /* CONFIG_GPIOLIB */ diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h index 63579cb8d3dc..ad9051bab267 100644 --- a/include/linux/huge_mm.h +++ b/include/linux/huge_mm.h @@ -132,7 +132,7 @@ extern int __pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma, static inline int pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma, spinlock_t **ptl) { - VM_BUG_ON(!rwsem_is_locked(&vma->vm_mm->mmap_sem)); + VM_BUG_ON_VMA(!rwsem_is_locked(&vma->vm_mm->mmap_sem), vma); if (pmd_trans_huge(*pmd)) return __pmd_trans_huge_lock(pmd, vma, ptl); else diff --git a/include/linux/ieee802154.h b/include/linux/ieee802154.h new file mode 100644 index 000000000000..d043449a079d --- /dev/null +++ b/include/linux/ieee802154.h @@ -0,0 +1,232 @@ +/* + * IEEE802.15.4-2003 specification + * + * Copyright (C) 2007, 2008 Siemens AG + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * Written by: + * Pavel Smolenskiy <pavel.smolenskiy@gmail.com> + * Maxim Gorbachyov <maxim.gorbachev@siemens.com> + * Maxim Osipov <maxim.osipov@siemens.com> + * Dmitry Eremin-Solenikov <dbaryshkov@gmail.com> + * Alexander Smirnov <alex.bluesman.smirnov@gmail.com> + */ + +#ifndef LINUX_IEEE802154_H +#define LINUX_IEEE802154_H + +#include <linux/types.h> +#include <linux/random.h> +#include <asm/byteorder.h> + +#define IEEE802154_MTU 127 +#define IEEE802154_MIN_PSDU_LEN 5 + +#define IEEE802154_EXTENDED_ADDR_LEN 8 + +#define IEEE802154_FC_TYPE_BEACON 0x0 /* Frame is beacon */ +#define IEEE802154_FC_TYPE_DATA 0x1 /* Frame is data */ +#define IEEE802154_FC_TYPE_ACK 0x2 /* Frame is acknowledgment */ +#define IEEE802154_FC_TYPE_MAC_CMD 0x3 /* Frame is MAC command */ + +#define IEEE802154_FC_TYPE_SHIFT 0 +#define IEEE802154_FC_TYPE_MASK ((1 << 3) - 1) +#define IEEE802154_FC_TYPE(x) ((x & IEEE802154_FC_TYPE_MASK) >> IEEE802154_FC_TYPE_SHIFT) +#define IEEE802154_FC_SET_TYPE(v, x) do { \ + v = (((v) & ~IEEE802154_FC_TYPE_MASK) | \ + (((x) << IEEE802154_FC_TYPE_SHIFT) & IEEE802154_FC_TYPE_MASK)); \ + } while (0) + +#define IEEE802154_FC_SECEN_SHIFT 3 +#define IEEE802154_FC_SECEN (1 << IEEE802154_FC_SECEN_SHIFT) +#define IEEE802154_FC_FRPEND_SHIFT 4 +#define IEEE802154_FC_FRPEND (1 << IEEE802154_FC_FRPEND_SHIFT) +#define IEEE802154_FC_ACK_REQ_SHIFT 5 +#define IEEE802154_FC_ACK_REQ (1 << IEEE802154_FC_ACK_REQ_SHIFT) +#define IEEE802154_FC_INTRA_PAN_SHIFT 6 +#define IEEE802154_FC_INTRA_PAN (1 << IEEE802154_FC_INTRA_PAN_SHIFT) + +#define IEEE802154_FC_SAMODE_SHIFT 14 +#define IEEE802154_FC_SAMODE_MASK (3 << IEEE802154_FC_SAMODE_SHIFT) +#define IEEE802154_FC_DAMODE_SHIFT 10 +#define IEEE802154_FC_DAMODE_MASK (3 << IEEE802154_FC_DAMODE_SHIFT) + +#define IEEE802154_FC_VERSION_SHIFT 12 +#define IEEE802154_FC_VERSION_MASK (3 << IEEE802154_FC_VERSION_SHIFT) +#define IEEE802154_FC_VERSION(x) ((x & IEEE802154_FC_VERSION_MASK) >> IEEE802154_FC_VERSION_SHIFT) + +#define IEEE802154_FC_SAMODE(x) \ + (((x) & IEEE802154_FC_SAMODE_MASK) >> IEEE802154_FC_SAMODE_SHIFT) + +#define IEEE802154_FC_DAMODE(x) \ + (((x) & IEEE802154_FC_DAMODE_MASK) >> IEEE802154_FC_DAMODE_SHIFT) + +#define IEEE802154_SCF_SECLEVEL_MASK 7 +#define IEEE802154_SCF_SECLEVEL_SHIFT 0 +#define IEEE802154_SCF_SECLEVEL(x) (x & IEEE802154_SCF_SECLEVEL_MASK) +#define IEEE802154_SCF_KEY_ID_MODE_SHIFT 3 +#define IEEE802154_SCF_KEY_ID_MODE_MASK (3 << IEEE802154_SCF_KEY_ID_MODE_SHIFT) +#define IEEE802154_SCF_KEY_ID_MODE(x) \ + ((x & IEEE802154_SCF_KEY_ID_MODE_MASK) >> IEEE802154_SCF_KEY_ID_MODE_SHIFT) + +#define IEEE802154_SCF_KEY_IMPLICIT 0 +#define IEEE802154_SCF_KEY_INDEX 1 +#define IEEE802154_SCF_KEY_SHORT_INDEX 2 +#define IEEE802154_SCF_KEY_HW_INDEX 3 + +#define IEEE802154_SCF_SECLEVEL_NONE 0 +#define IEEE802154_SCF_SECLEVEL_MIC32 1 +#define IEEE802154_SCF_SECLEVEL_MIC64 2 +#define IEEE802154_SCF_SECLEVEL_MIC128 3 +#define IEEE802154_SCF_SECLEVEL_ENC 4 +#define IEEE802154_SCF_SECLEVEL_ENC_MIC32 5 +#define IEEE802154_SCF_SECLEVEL_ENC_MIC64 6 +#define IEEE802154_SCF_SECLEVEL_ENC_MIC128 7 + +/* MAC footer size */ +#define IEEE802154_MFR_SIZE 2 /* 2 octets */ + +/* MAC's Command Frames Identifiers */ +#define IEEE802154_CMD_ASSOCIATION_REQ 0x01 +#define IEEE802154_CMD_ASSOCIATION_RESP 0x02 +#define IEEE802154_CMD_DISASSOCIATION_NOTIFY 0x03 +#define IEEE802154_CMD_DATA_REQ 0x04 +#define IEEE802154_CMD_PANID_CONFLICT_NOTIFY 0x05 +#define IEEE802154_CMD_ORPHAN_NOTIFY 0x06 +#define IEEE802154_CMD_BEACON_REQ 0x07 +#define IEEE802154_CMD_COORD_REALIGN_NOTIFY 0x08 +#define IEEE802154_CMD_GTS_REQ 0x09 + +/* + * The return values of MAC operations + */ +enum { + /* + * The requested operation was completed successfully. + * For a transmission request, this value indicates + * a successful transmission. + */ + IEEE802154_SUCCESS = 0x0, + + /* The beacon was lost following a synchronization request. */ + IEEE802154_BEACON_LOSS = 0xe0, + /* + * A transmission could not take place due to activity on the + * channel, i.e., the CSMA-CA mechanism has failed. + */ + IEEE802154_CHNL_ACCESS_FAIL = 0xe1, + /* The GTS request has been denied by the PAN coordinator. */ + IEEE802154_DENINED = 0xe2, + /* The attempt to disable the transceiver has failed. */ + IEEE802154_DISABLE_TRX_FAIL = 0xe3, + /* + * The received frame induces a failed security check according to + * the security suite. + */ + IEEE802154_FAILED_SECURITY_CHECK = 0xe4, + /* + * The frame resulting from secure processing has a length that is + * greater than aMACMaxFrameSize. + */ + IEEE802154_FRAME_TOO_LONG = 0xe5, + /* + * The requested GTS transmission failed because the specified GTS + * either did not have a transmit GTS direction or was not defined. + */ + IEEE802154_INVALID_GTS = 0xe6, + /* + * A request to purge an MSDU from the transaction queue was made using + * an MSDU handle that was not found in the transaction table. + */ + IEEE802154_INVALID_HANDLE = 0xe7, + /* A parameter in the primitive is out of the valid range.*/ + IEEE802154_INVALID_PARAMETER = 0xe8, + /* No acknowledgment was received after aMaxFrameRetries. */ + IEEE802154_NO_ACK = 0xe9, + /* A scan operation failed to find any network beacons.*/ + IEEE802154_NO_BEACON = 0xea, + /* No response data were available following a request. */ + IEEE802154_NO_DATA = 0xeb, + /* The operation failed because a short address was not allocated. */ + IEEE802154_NO_SHORT_ADDRESS = 0xec, + /* + * A receiver enable request was unsuccessful because it could not be + * completed within the CAP. + */ + IEEE802154_OUT_OF_CAP = 0xed, + /* + * A PAN identifier conflict has been detected and communicated to the + * PAN coordinator. + */ + IEEE802154_PANID_CONFLICT = 0xee, + /* A coordinator realignment command has been received. */ + IEEE802154_REALIGMENT = 0xef, + /* The transaction has expired and its information discarded. */ + IEEE802154_TRANSACTION_EXPIRED = 0xf0, + /* There is no capacity to store the transaction. */ + IEEE802154_TRANSACTION_OVERFLOW = 0xf1, + /* + * The transceiver was in the transmitter enabled state when the + * receiver was requested to be enabled. + */ + IEEE802154_TX_ACTIVE = 0xf2, + /* The appropriate key is not available in the ACL. */ + IEEE802154_UNAVAILABLE_KEY = 0xf3, + /* + * A SET/GET request was issued with the identifier of a PIB attribute + * that is not supported. + */ + IEEE802154_UNSUPPORTED_ATTR = 0xf4, + /* + * A request to perform a scan operation failed because the MLME was + * in the process of performing a previously initiated scan operation. + */ + IEEE802154_SCAN_IN_PROGRESS = 0xfc, +}; + +/** + * ieee802154_is_valid_psdu_len - check if psdu len is valid + * @len: psdu len with (MHR + payload + MFR) + */ +static inline bool ieee802154_is_valid_psdu_len(const u8 len) +{ + return (len >= IEEE802154_MIN_PSDU_LEN && len <= IEEE802154_MTU); +} + +/** + * ieee802154_is_valid_psdu_len - check if extended addr is valid + * @addr: extended addr to check + */ +static inline bool ieee802154_is_valid_extended_addr(const __le64 addr) +{ + /* These EUI-64 addresses are reserved by IEEE. 0xffffffffffffffff + * is used internally as extended to short address broadcast mapping. + * This is currently a workaround because neighbor discovery can't + * deal with short addresses types right now. + */ + return ((addr != cpu_to_le64(0x0000000000000000ULL)) && + (addr != cpu_to_le64(0xffffffffffffffffULL))); +} + +/** + * ieee802154_random_extended_addr - generates a random extended address + * @addr: extended addr pointer to place the random address + */ +static inline void ieee802154_random_extended_addr(__le64 *addr) +{ + get_random_bytes(addr, IEEE802154_EXTENDED_ADDR_LEN); + + /* toggle some bit if we hit an invalid extended addr */ + if (!ieee802154_is_valid_extended_addr(*addr)) + ((u8 *)addr)[IEEE802154_EXTENDED_ADDR_LEN - 1] ^= 0x01; +} + +#endif /* LINUX_IEEE802154_H */ diff --git a/include/linux/ima.h b/include/linux/ima.h index 7cf5e9b32550..120ccc53fcb7 100644 --- a/include/linux/ima.h +++ b/include/linux/ima.h @@ -15,7 +15,7 @@ struct linux_binprm; #ifdef CONFIG_IMA extern int ima_bprm_check(struct linux_binprm *bprm); -extern int ima_file_check(struct file *file, int mask); +extern int ima_file_check(struct file *file, int mask, int opened); extern void ima_file_free(struct file *file); extern int ima_file_mmap(struct file *file, unsigned long prot); extern int ima_module_check(struct file *file); @@ -27,7 +27,7 @@ static inline int ima_bprm_check(struct linux_binprm *bprm) return 0; } -static inline int ima_file_check(struct file *file, int mask) +static inline int ima_file_check(struct file *file, int mask, int opened) { return 0; } diff --git a/include/linux/init_task.h b/include/linux/init_task.h index 2bb4c4f3531a..77fc43f8fb72 100644 --- a/include/linux/init_task.h +++ b/include/linux/init_task.h @@ -111,12 +111,21 @@ extern struct group_info init_groups; #ifdef CONFIG_PREEMPT_RCU #define INIT_TASK_RCU_PREEMPT(tsk) \ .rcu_read_lock_nesting = 0, \ - .rcu_read_unlock_special = 0, \ + .rcu_read_unlock_special.s = 0, \ .rcu_node_entry = LIST_HEAD_INIT(tsk.rcu_node_entry), \ INIT_TASK_RCU_TREE_PREEMPT() #else #define INIT_TASK_RCU_PREEMPT(tsk) #endif +#ifdef CONFIG_TASKS_RCU +#define INIT_TASK_RCU_TASKS(tsk) \ + .rcu_tasks_holdout = false, \ + .rcu_tasks_holdout_list = \ + LIST_HEAD_INIT(tsk.rcu_tasks_holdout_list), \ + .rcu_tasks_idle_cpu = -1, +#else +#define INIT_TASK_RCU_TASKS(tsk) +#endif extern struct cred init_cred; @@ -224,6 +233,7 @@ extern struct task_group root_task_group; INIT_FTRACE_GRAPH \ INIT_TRACE_RECURSION \ INIT_TASK_RCU_PREEMPT(tsk) \ + INIT_TASK_RCU_TASKS(tsk) \ INIT_CPUSET_SEQ(tsk) \ INIT_RT_MUTEXES(tsk) \ INIT_VTIME(tsk) \ diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h index 698ad053d064..69517a24bc50 100644 --- a/include/linux/interrupt.h +++ b/include/linux/interrupt.h @@ -193,11 +193,6 @@ extern void irq_wake_thread(unsigned int irq, void *dev_id); /* The following three functions are for the core kernel use only. */ extern void suspend_device_irqs(void); extern void resume_device_irqs(void); -#ifdef CONFIG_PM_SLEEP -extern int check_wakeup_irqs(void); -#else -static inline int check_wakeup_irqs(void) { return 0; } -#endif /** * struct irq_affinity_notify - context for notification of IRQ affinity changes diff --git a/include/linux/iommu.h b/include/linux/iommu.h index 20f9a527922a..e6a7c9ff72f2 100644 --- a/include/linux/iommu.h +++ b/include/linux/iommu.h @@ -57,8 +57,11 @@ struct iommu_domain { struct iommu_domain_geometry geometry; }; -#define IOMMU_CAP_CACHE_COHERENCY 0x1 -#define IOMMU_CAP_INTR_REMAP 0x2 /* isolates device intrs */ +enum iommu_cap { + IOMMU_CAP_CACHE_COHERENCY, /* IOMMU can enforce cache coherent DMA + transactions */ + IOMMU_CAP_INTR_REMAP, /* IOMMU supports interrupt isolation */ +}; /* * Following constraints are specifc to FSL_PAMUV1: @@ -80,6 +83,7 @@ enum iommu_attr { DOMAIN_ATTR_FSL_PAMU_STASH, DOMAIN_ATTR_FSL_PAMU_ENABLE, DOMAIN_ATTR_FSL_PAMUV1, + DOMAIN_ATTR_NESTING, /* two stages of translation */ DOMAIN_ATTR_MAX, }; @@ -94,7 +98,6 @@ enum iommu_attr { * @map: map a physically contiguous memory region to an iommu domain * @unmap: unmap a physically contiguous memory region from an iommu domain * @iova_to_phys: translate iova to physical address - * @domain_has_cap: domain capabilities query * @add_device: add device to iommu grouping * @remove_device: remove device from iommu grouping * @domain_get_attr: Query domain attributes @@ -102,6 +105,7 @@ enum iommu_attr { * @pgsize_bitmap: bitmap of supported page sizes */ struct iommu_ops { + bool (*capable)(enum iommu_cap); int (*domain_init)(struct iommu_domain *domain); void (*domain_destroy)(struct iommu_domain *domain); int (*attach_dev)(struct iommu_domain *domain, struct device *dev); @@ -111,8 +115,6 @@ struct iommu_ops { size_t (*unmap)(struct iommu_domain *domain, unsigned long iova, size_t size); phys_addr_t (*iova_to_phys)(struct iommu_domain *domain, dma_addr_t iova); - int (*domain_has_cap)(struct iommu_domain *domain, - unsigned long cap); int (*add_device)(struct device *dev); void (*remove_device)(struct device *dev); int (*device_group)(struct device *dev, unsigned int *groupid); @@ -142,6 +144,7 @@ struct iommu_ops { extern int bus_set_iommu(struct bus_type *bus, const struct iommu_ops *ops); extern bool iommu_present(struct bus_type *bus); +extern bool iommu_capable(struct bus_type *bus, enum iommu_cap cap); extern struct iommu_domain *iommu_domain_alloc(struct bus_type *bus); extern struct iommu_group *iommu_group_get_by_id(int id); extern void iommu_domain_free(struct iommu_domain *domain); @@ -154,8 +157,6 @@ extern int iommu_map(struct iommu_domain *domain, unsigned long iova, extern size_t iommu_unmap(struct iommu_domain *domain, unsigned long iova, size_t size); extern phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova); -extern int iommu_domain_has_cap(struct iommu_domain *domain, - unsigned long cap); extern void iommu_set_fault_handler(struct iommu_domain *domain, iommu_fault_handler_t handler, void *token); @@ -250,6 +251,11 @@ static inline bool iommu_present(struct bus_type *bus) return false; } +static inline bool iommu_capable(struct bus_type *bus, enum iommu_cap cap) +{ + return false; +} + static inline struct iommu_domain *iommu_domain_alloc(struct bus_type *bus) { return NULL; @@ -304,12 +310,6 @@ static inline phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, dma_ad return 0; } -static inline int iommu_domain_has_cap(struct iommu_domain *domain, - unsigned long cap) -{ - return 0; -} - static inline void iommu_set_fault_handler(struct iommu_domain *domain, iommu_fault_handler_t handler, void *token) { diff --git a/include/linux/ioport.h b/include/linux/ioport.h index 142ec544167c..2c5250222278 100644 --- a/include/linux/ioport.h +++ b/include/linux/ioport.h @@ -215,6 +215,11 @@ static inline int __deprecated check_region(resource_size_t s, /* Wrappers for managed devices */ struct device; + +extern int devm_request_resource(struct device *dev, struct resource *root, + struct resource *new); +extern void devm_release_resource(struct device *dev, struct resource *new); + #define devm_request_region(dev,start,n,name) \ __devm_request_region(dev, &ioport_resource, (start), (n), (name)) #define devm_request_mem_region(dev,start,n,name) \ diff --git a/include/linux/irq.h b/include/linux/irq.h index 62af59242ddc..03f48d936f66 100644 --- a/include/linux/irq.h +++ b/include/linux/irq.h @@ -173,6 +173,7 @@ struct irq_data { * IRQD_IRQ_DISABLED - Disabled state of the interrupt * IRQD_IRQ_MASKED - Masked state of the interrupt * IRQD_IRQ_INPROGRESS - In progress state of the interrupt + * IRQD_WAKEUP_ARMED - Wakeup mode armed */ enum { IRQD_TRIGGER_MASK = 0xf, @@ -186,6 +187,7 @@ enum { IRQD_IRQ_DISABLED = (1 << 16), IRQD_IRQ_MASKED = (1 << 17), IRQD_IRQ_INPROGRESS = (1 << 18), + IRQD_WAKEUP_ARMED = (1 << 19), }; static inline bool irqd_is_setaffinity_pending(struct irq_data *d) @@ -257,6 +259,12 @@ static inline bool irqd_irq_inprogress(struct irq_data *d) return d->state_use_accessors & IRQD_IRQ_INPROGRESS; } +static inline bool irqd_is_wakeup_armed(struct irq_data *d) +{ + return d->state_use_accessors & IRQD_WAKEUP_ARMED; +} + + /* * Functions for chained handlers which can be enabled/disabled by the * standard disable_irq/enable_irq calls. Must be called with diff --git a/include/linux/irq_work.h b/include/linux/irq_work.h index bf9422c3aefe..bf3fe719c7ce 100644 --- a/include/linux/irq_work.h +++ b/include/linux/irq_work.h @@ -39,9 +39,12 @@ bool irq_work_queue_on(struct irq_work *work, int cpu); #endif void irq_work_run(void); +void irq_work_tick(void); void irq_work_sync(struct irq_work *work); #ifdef CONFIG_IRQ_WORK +#include <asm/irq_work.h> + bool irq_work_needs_cpu(void); #else static inline bool irq_work_needs_cpu(void) { return false; } diff --git a/include/linux/irqchip/arm-gic.h b/include/linux/irqchip/arm-gic.h index 45e2d8c15bd2..13eed92c7d24 100644 --- a/include/linux/irqchip/arm-gic.h +++ b/include/linux/irqchip/arm-gic.h @@ -21,7 +21,11 @@ #define GIC_CPU_ACTIVEPRIO 0xd0 #define GIC_CPU_IDENT 0xfc +#define GICC_ENABLE 0x1 +#define GICC_INT_PRI_THRESHOLD 0xf0 #define GICC_IAR_INT_ID_MASK 0x3ff +#define GICC_INT_SPURIOUS 1023 +#define GICC_DIS_BYPASS_MASK 0x1e0 #define GIC_DIST_CTRL 0x000 #define GIC_DIST_CTR 0x004 @@ -39,6 +43,18 @@ #define GIC_DIST_SGI_PENDING_CLEAR 0xf10 #define GIC_DIST_SGI_PENDING_SET 0xf20 +#define GICD_ENABLE 0x1 +#define GICD_DISABLE 0x0 +#define GICD_INT_ACTLOW_LVLTRIG 0x0 +#define GICD_INT_EN_CLR_X32 0xffffffff +#define GICD_INT_EN_SET_SGI 0x0000ffff +#define GICD_INT_EN_CLR_PPI 0xffff0000 +#define GICD_INT_DEF_PRI 0xa0 +#define GICD_INT_DEF_PRI_X4 ((GICD_INT_DEF_PRI << 24) |\ + (GICD_INT_DEF_PRI << 16) |\ + (GICD_INT_DEF_PRI << 8) |\ + GICD_INT_DEF_PRI) + #define GICH_HCR 0x0 #define GICH_VTR 0x4 #define GICH_VMCR 0x8 diff --git a/include/linux/irqdesc.h b/include/linux/irqdesc.h index 472c021a2d4f..faf433af425e 100644 --- a/include/linux/irqdesc.h +++ b/include/linux/irqdesc.h @@ -12,6 +12,8 @@ struct irq_affinity_notify; struct proc_dir_entry; struct module; struct irq_desc; +struct irq_domain; +struct pt_regs; /** * struct irq_desc - interrupt descriptor @@ -36,6 +38,11 @@ struct irq_desc; * @threads_oneshot: bitfield to handle shared oneshot threads * @threads_active: number of irqaction threads currently running * @wait_for_threads: wait queue for sync_irq to wait for threaded handlers + * @nr_actions: number of installed actions on this descriptor + * @no_suspend_depth: number of irqactions on a irq descriptor with + * IRQF_NO_SUSPEND set + * @force_resume_depth: number of irqactions on a irq descriptor with + * IRQF_FORCE_RESUME set * @dir: /proc/irq/ procfs entry * @name: flow handler name for /proc/interrupts output */ @@ -68,6 +75,11 @@ struct irq_desc { unsigned long threads_oneshot; atomic_t threads_active; wait_queue_head_t wait_for_threads; +#ifdef CONFIG_PM_SLEEP + unsigned int nr_actions; + unsigned int no_suspend_depth; + unsigned int force_resume_depth; +#endif #ifdef CONFIG_PROC_FS struct proc_dir_entry *dir; #endif @@ -118,6 +130,23 @@ static inline void generic_handle_irq_desc(unsigned int irq, struct irq_desc *de int generic_handle_irq(unsigned int irq); +#ifdef CONFIG_HANDLE_DOMAIN_IRQ +/* + * Convert a HW interrupt number to a logical one using a IRQ domain, + * and handle the result interrupt number. Return -EINVAL if + * conversion failed. Providing a NULL domain indicates that the + * conversion has already been done. + */ +int __handle_domain_irq(struct irq_domain *domain, unsigned int hwirq, + bool lookup, struct pt_regs *regs); + +static inline int handle_domain_irq(struct irq_domain *domain, + unsigned int hwirq, struct pt_regs *regs) +{ + return __handle_domain_irq(domain, hwirq, true, regs); +} +#endif + /* Test to see if a driver has successfully requested an irq */ static inline int irq_has_action(unsigned int irq) { diff --git a/include/linux/jump_label.h b/include/linux/jump_label.h index 784304b222b3..98f923b6a0ea 100644 --- a/include/linux/jump_label.h +++ b/include/linux/jump_label.h @@ -8,28 +8,28 @@ * Copyright (C) 2011-2012 Peter Zijlstra <pzijlstr@redhat.com> * * Jump labels provide an interface to generate dynamic branches using - * self-modifying code. Assuming toolchain and architecture support the result - * of a "if (static_key_false(&key))" statement is a unconditional branch (which + * self-modifying code. Assuming toolchain and architecture support, the result + * of a "if (static_key_false(&key))" statement is an unconditional branch (which * defaults to false - and the true block is placed out of line). * * However at runtime we can change the branch target using * static_key_slow_{inc,dec}(). These function as a 'reference' count on the key - * object and for as long as there are references all branches referring to + * object, and for as long as there are references all branches referring to * that particular key will point to the (out of line) true block. * - * Since this relies on modifying code the static_key_slow_{inc,dec}() functions + * Since this relies on modifying code, the static_key_slow_{inc,dec}() functions * must be considered absolute slow paths (machine wide synchronization etc.). - * OTOH, since the affected branches are unconditional their runtime overhead + * OTOH, since the affected branches are unconditional, their runtime overhead * will be absolutely minimal, esp. in the default (off) case where the total * effect is a single NOP of appropriate size. The on case will patch in a jump * to the out-of-line block. * - * When the control is directly exposed to userspace it is prudent to delay the + * When the control is directly exposed to userspace, it is prudent to delay the * decrement to avoid high frequency code modifications which can (and do) * cause significant performance degradation. Struct static_key_deferred and * static_key_slow_dec_deferred() provide for this. * - * Lacking toolchain and or architecture support, it falls back to a simple + * Lacking toolchain and or architecture support, jump labels fall back to a simple * conditional branch. * * struct static_key my_key = STATIC_KEY_INIT_TRUE; @@ -43,8 +43,7 @@ * * Not initializing the key (static data is initialized to 0s anyway) is the * same as using STATIC_KEY_INIT_FALSE. - * -*/ + */ #include <linux/types.h> #include <linux/compiler.h> diff --git a/include/linux/kernel.h b/include/linux/kernel.h index 95624bed87ef..40728cf1c452 100644 --- a/include/linux/kernel.h +++ b/include/linux/kernel.h @@ -376,10 +376,6 @@ extern unsigned long simple_strtoul(const char *,char **,unsigned int); extern long simple_strtol(const char *,char **,unsigned int); extern unsigned long long simple_strtoull(const char *,char **,unsigned int); extern long long simple_strtoll(const char *,char **,unsigned int); -#define strict_strtoul kstrtoul -#define strict_strtol kstrtol -#define strict_strtoull kstrtoull -#define strict_strtoll kstrtoll extern int num_to_str(char *buf, int size, unsigned long long num); @@ -496,6 +492,7 @@ static inline char *hex_byte_pack_upper(char *buf, u8 byte) extern int hex_to_bin(char ch); extern int __must_check hex2bin(u8 *dst, const char *src, size_t count); +extern char *bin2hex(char *dst, const void *src, size_t count); bool mac_pton(const char *s, u8 *mac); @@ -715,23 +712,8 @@ static inline void ftrace_dump(enum ftrace_dump_mode oops_dump_mode) { } (void) (&_max1 == &_max2); \ _max1 > _max2 ? _max1 : _max2; }) -#define min3(x, y, z) ({ \ - typeof(x) _min1 = (x); \ - typeof(y) _min2 = (y); \ - typeof(z) _min3 = (z); \ - (void) (&_min1 == &_min2); \ - (void) (&_min1 == &_min3); \ - _min1 < _min2 ? (_min1 < _min3 ? _min1 : _min3) : \ - (_min2 < _min3 ? _min2 : _min3); }) - -#define max3(x, y, z) ({ \ - typeof(x) _max1 = (x); \ - typeof(y) _max2 = (y); \ - typeof(z) _max3 = (z); \ - (void) (&_max1 == &_max2); \ - (void) (&_max1 == &_max3); \ - _max1 > _max2 ? (_max1 > _max3 ? _max1 : _max3) : \ - (_max2 > _max3 ? _max2 : _max3); }) +#define min3(x, y, z) min((typeof(x))min(x, y), z) +#define max3(x, y, z) max((typeof(x))max(x, y), z) /** * min_not_zero - return the minimum that is _not_ zero, unless both are zero @@ -746,20 +728,13 @@ static inline void ftrace_dump(enum ftrace_dump_mode oops_dump_mode) { } /** * clamp - return a value clamped to a given range with strict typechecking * @val: current value - * @min: minimum allowable value - * @max: maximum allowable value + * @lo: lowest allowable value + * @hi: highest allowable value * - * This macro does strict typechecking of min/max to make sure they are of the + * This macro does strict typechecking of lo/hi to make sure they are of the * same type as val. See the unnecessary pointer comparisons. */ -#define clamp(val, min, max) ({ \ - typeof(val) __val = (val); \ - typeof(min) __min = (min); \ - typeof(max) __max = (max); \ - (void) (&__val == &__min); \ - (void) (&__val == &__max); \ - __val = __val < __min ? __min: __val; \ - __val > __max ? __max: __val; }) +#define clamp(val, lo, hi) min((typeof(val))max(val, lo), hi) /* * ..and if you can't take the strict @@ -781,36 +756,26 @@ static inline void ftrace_dump(enum ftrace_dump_mode oops_dump_mode) { } * clamp_t - return a value clamped to a given range using a given type * @type: the type of variable to use * @val: current value - * @min: minimum allowable value - * @max: maximum allowable value + * @lo: minimum allowable value + * @hi: maximum allowable value * * This macro does no typechecking and uses temporary variables of type * 'type' to make all the comparisons. */ -#define clamp_t(type, val, min, max) ({ \ - type __val = (val); \ - type __min = (min); \ - type __max = (max); \ - __val = __val < __min ? __min: __val; \ - __val > __max ? __max: __val; }) +#define clamp_t(type, val, lo, hi) min_t(type, max_t(type, val, lo), hi) /** * clamp_val - return a value clamped to a given range using val's type * @val: current value - * @min: minimum allowable value - * @max: maximum allowable value + * @lo: minimum allowable value + * @hi: maximum allowable value * * This macro does no typechecking and uses temporary variables of whatever * type the input argument 'val' is. This is useful when val is an unsigned * type and min and max are literals that will otherwise be assigned a signed * integer type. */ -#define clamp_val(val, min, max) ({ \ - typeof(val) __val = (val); \ - typeof(val) __min = (min); \ - typeof(val) __max = (max); \ - __val = __val < __min ? __min: __val; \ - __val > __max ? __max: __val; }) +#define clamp_val(val, lo, hi) clamp_t(typeof(val), val, lo, hi) /* diff --git a/include/linux/kernel_stat.h b/include/linux/kernel_stat.h index ecbc52f9ff77..8422b4ed6882 100644 --- a/include/linux/kernel_stat.h +++ b/include/linux/kernel_stat.h @@ -44,8 +44,8 @@ DECLARE_PER_CPU(struct kernel_stat, kstat); DECLARE_PER_CPU(struct kernel_cpustat, kernel_cpustat); /* Must have preemption disabled for this to be meaningful. */ -#define kstat_this_cpu (&__get_cpu_var(kstat)) -#define kcpustat_this_cpu (&__get_cpu_var(kernel_cpustat)) +#define kstat_this_cpu this_cpu_ptr(&kstat) +#define kcpustat_this_cpu this_cpu_ptr(&kernel_cpustat) #define kstat_cpu(cpu) per_cpu(kstat, cpu) #define kcpustat_cpu(cpu) per_cpu(kernel_cpustat, cpu) diff --git a/include/linux/kernelcapi.h b/include/linux/kernelcapi.h index 9be37da93680..e985ba679c4a 100644 --- a/include/linux/kernelcapi.h +++ b/include/linux/kernelcapi.h @@ -41,7 +41,7 @@ u16 capi20_get_manufacturer(u32 contr, u8 buf[CAPI_MANUFACTURER_LEN]); u16 capi20_get_version(u32 contr, struct capi_version *verp); u16 capi20_get_serial(u32 contr, u8 serial[CAPI_SERIAL_LEN]); u16 capi20_get_profile(u32 contr, struct capi_profile *profp); -int capi20_manufacturer(unsigned int cmd, void __user *data); +int capi20_manufacturer(unsigned long cmd, void __user *data); #define CAPICTR_UP 0 #define CAPICTR_DOWN 1 diff --git a/include/linux/kexec.h b/include/linux/kexec.h index 4b2a0e11cc5b..9d957b7ae095 100644 --- a/include/linux/kexec.h +++ b/include/linux/kexec.h @@ -178,6 +178,7 @@ struct kexec_buf { struct kimage *image; char *buffer; unsigned long bufsz; + unsigned long mem; unsigned long memsz; unsigned long buf_align; unsigned long buf_min; diff --git a/include/linux/key-type.h b/include/linux/key-type.h index 44792ee649de..ff9f1d394235 100644 --- a/include/linux/key-type.h +++ b/include/linux/key-type.h @@ -53,6 +53,24 @@ typedef int (*request_key_actor_t)(struct key_construction *key, const char *op, void *aux); /* + * Preparsed matching criterion. + */ +struct key_match_data { + /* Comparison function, defaults to exact description match, but can be + * overridden by type->match_preparse(). Should return true if a match + * is found and false if not. + */ + bool (*cmp)(const struct key *key, + const struct key_match_data *match_data); + + const void *raw_data; /* Raw match data */ + void *preparsed; /* For ->match_preparse() to stash stuff */ + unsigned lookup_type; /* Type of lookup for this search. */ +#define KEYRING_SEARCH_LOOKUP_DIRECT 0x0000 /* Direct lookup by description. */ +#define KEYRING_SEARCH_LOOKUP_ITERATE 0x0001 /* Iterative search. */ +}; + +/* * kernel managed key type definition */ struct key_type { @@ -65,11 +83,6 @@ struct key_type { */ size_t def_datalen; - /* Default key search algorithm. */ - unsigned def_lookup_type; -#define KEYRING_SEARCH_LOOKUP_DIRECT 0x0000 /* Direct lookup by description. */ -#define KEYRING_SEARCH_LOOKUP_ITERATE 0x0001 /* Iterative search. */ - /* vet a description */ int (*vet_description)(const char *description); @@ -96,8 +109,15 @@ struct key_type { */ int (*update)(struct key *key, struct key_preparsed_payload *prep); - /* match a key against a description */ - int (*match)(const struct key *key, const void *desc); + /* Preparse the data supplied to ->match() (optional). The + * data to be preparsed can be found in match_data->raw_data. + * The lookup type can also be set by this function. + */ + int (*match_preparse)(struct key_match_data *match_data); + + /* Free preparsed match data (optional). This should be supplied it + * ->match_preparse() is supplied. */ + void (*match_free)(struct key_match_data *match_data); /* clear some of the data from a key on revokation (optional) * - the key's semaphore will be write-locked by the caller diff --git a/include/linux/libata.h b/include/linux/libata.h index 92abb497ab14..bd5fefeaf548 100644 --- a/include/linux/libata.h +++ b/include/linux/libata.h @@ -1404,14 +1404,14 @@ static inline int sata_srst_pmp(struct ata_link *link) * printk helpers */ __printf(3, 4) -int ata_port_printk(const struct ata_port *ap, const char *level, - const char *fmt, ...); +void ata_port_printk(const struct ata_port *ap, const char *level, + const char *fmt, ...); __printf(3, 4) -int ata_link_printk(const struct ata_link *link, const char *level, - const char *fmt, ...); +void ata_link_printk(const struct ata_link *link, const char *level, + const char *fmt, ...); __printf(3, 4) -int ata_dev_printk(const struct ata_device *dev, const char *level, - const char *fmt, ...); +void ata_dev_printk(const struct ata_device *dev, const char *level, + const char *fmt, ...); #define ata_port_err(ap, fmt, ...) \ ata_port_printk(ap, KERN_ERR, fmt, ##__VA_ARGS__) diff --git a/include/linux/list.h b/include/linux/list.h index cbbb96fcead9..f33f831eb3c8 100644 --- a/include/linux/list.h +++ b/include/linux/list.h @@ -5,6 +5,7 @@ #include <linux/stddef.h> #include <linux/poison.h> #include <linux/const.h> +#include <linux/kernel.h> /* * Simple doubly linked list implementation. diff --git a/include/linux/lockd/lockd.h b/include/linux/lockd/lockd.h index 219d79627c05..ff82a32871b5 100644 --- a/include/linux/lockd/lockd.h +++ b/include/linux/lockd/lockd.h @@ -178,7 +178,6 @@ struct nlm_block { unsigned char b_granted; /* VFS granted lock */ struct nlm_file * b_file; /* file in question */ struct cache_req * b_cache_req; /* deferred request handling */ - struct file_lock * b_fl; /* set for GETLK */ struct cache_deferred_req * b_deferred_req; unsigned int b_flags; /* block flags */ #define B_QUEUED 1 /* lock queued */ diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h index 64c7425afbce..74ab23176e9b 100644 --- a/include/linux/lockdep.h +++ b/include/linux/lockdep.h @@ -4,7 +4,7 @@ * Copyright (C) 2006,2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com> * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com> * - * see Documentation/lockdep-design.txt for more details. + * see Documentation/locking/lockdep-design.txt for more details. */ #ifndef __LINUX_LOCKDEP_H #define __LINUX_LOCKDEP_H @@ -510,6 +510,7 @@ static inline void print_irqtrace_events(struct task_struct *curr) #define lock_map_acquire(l) lock_acquire_exclusive(l, 0, 0, NULL, _THIS_IP_) #define lock_map_acquire_read(l) lock_acquire_shared_recursive(l, 0, 0, NULL, _THIS_IP_) +#define lock_map_acquire_tryread(l) lock_acquire_shared_recursive(l, 0, 1, NULL, _THIS_IP_) #define lock_map_release(l) lock_release(l, 1, _THIS_IP_) #ifdef CONFIG_PROVE_LOCKING diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index e0752d204d9e..19df5d857411 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h @@ -440,11 +440,6 @@ void __memcg_kmem_uncharge_pages(struct page *page, int order); int memcg_cache_id(struct mem_cgroup *memcg); -int memcg_alloc_cache_params(struct mem_cgroup *memcg, struct kmem_cache *s, - struct kmem_cache *root_cache); -void memcg_free_cache_params(struct kmem_cache *s); - -int memcg_update_cache_size(struct kmem_cache *s, int num_groups); void memcg_update_array_size(int num_groups); struct kmem_cache * @@ -574,16 +569,6 @@ static inline int memcg_cache_id(struct mem_cgroup *memcg) return -1; } -static inline int memcg_alloc_cache_params(struct mem_cgroup *memcg, - struct kmem_cache *s, struct kmem_cache *root_cache) -{ - return 0; -} - -static inline void memcg_free_cache_params(struct kmem_cache *s) -{ -} - static inline struct kmem_cache * memcg_kmem_get_cache(struct kmem_cache *cachep, gfp_t gfp) { diff --git a/include/linux/memory_hotplug.h b/include/linux/memory_hotplug.h index d9524c49d767..8f1a41951df9 100644 --- a/include/linux/memory_hotplug.h +++ b/include/linux/memory_hotplug.h @@ -84,6 +84,7 @@ extern int zone_grow_waitqueues(struct zone *zone, unsigned long nr_pages); extern int add_one_highpage(struct page *page, int pfn, int bad_ppro); /* VM interface that may be used by firmware interface */ extern int online_pages(unsigned long, unsigned long, int); +extern int test_pages_in_a_zone(unsigned long, unsigned long); extern void __offline_isolated_pages(unsigned long, unsigned long); typedef void (*online_page_callback_t)(struct page *page); diff --git a/include/linux/mempolicy.h b/include/linux/mempolicy.h index f230a978e6ba..3d385c81c153 100644 --- a/include/linux/mempolicy.h +++ b/include/linux/mempolicy.h @@ -134,9 +134,10 @@ void mpol_free_shared_policy(struct shared_policy *p); struct mempolicy *mpol_shared_policy_lookup(struct shared_policy *sp, unsigned long idx); -struct mempolicy *get_vma_policy(struct task_struct *tsk, - struct vm_area_struct *vma, unsigned long addr); -bool vma_policy_mof(struct task_struct *task, struct vm_area_struct *vma); +struct mempolicy *get_task_policy(struct task_struct *p); +struct mempolicy *__get_vma_policy(struct vm_area_struct *vma, + unsigned long addr); +bool vma_policy_mof(struct vm_area_struct *vma); extern void numa_default_policy(void); extern void numa_policy_init(void); diff --git a/include/linux/mfd/arizona/registers.h b/include/linux/mfd/arizona/registers.h index dbd23c36de21..c0b075f6bc35 100644 --- a/include/linux/mfd/arizona/registers.h +++ b/include/linux/mfd/arizona/registers.h @@ -27,6 +27,7 @@ #define ARIZONA_WRITE_SEQUENCER_CTRL_0 0x16 #define ARIZONA_WRITE_SEQUENCER_CTRL_1 0x17 #define ARIZONA_WRITE_SEQUENCER_CTRL_2 0x18 +#define ARIZONA_WRITE_SEQUENCER_CTRL_3 0x19 #define ARIZONA_WRITE_SEQUENCER_PROM 0x1A #define ARIZONA_TONE_GENERATOR_1 0x20 #define ARIZONA_TONE_GENERATOR_2 0x21 @@ -70,7 +71,9 @@ #define ARIZONA_SAMPLE_RATE_3_STATUS 0x10C #define ARIZONA_ASYNC_CLOCK_1 0x112 #define ARIZONA_ASYNC_SAMPLE_RATE_1 0x113 +#define ARIZONA_ASYNC_SAMPLE_RATE_2 0x114 #define ARIZONA_ASYNC_SAMPLE_RATE_1_STATUS 0x11B +#define ARIZONA_ASYNC_SAMPLE_RATE_2_STATUS 0x11C #define ARIZONA_OUTPUT_SYSTEM_CLOCK 0x149 #define ARIZONA_OUTPUT_ASYNC_CLOCK 0x14A #define ARIZONA_RATE_ESTIMATOR_1 0x152 @@ -1664,16 +1667,30 @@ /* * R275 (0x113) - Async sample rate 1 */ -#define ARIZONA_ASYNC_SAMPLE_RATE_MASK 0x001F /* ASYNC_SAMPLE_RATE - [4:0] */ -#define ARIZONA_ASYNC_SAMPLE_RATE_SHIFT 0 /* ASYNC_SAMPLE_RATE - [4:0] */ -#define ARIZONA_ASYNC_SAMPLE_RATE_WIDTH 5 /* ASYNC_SAMPLE_RATE - [4:0] */ +#define ARIZONA_ASYNC_SAMPLE_RATE_1_MASK 0x001F /* ASYNC_SAMPLE_RATE_1 - [4:0] */ +#define ARIZONA_ASYNC_SAMPLE_RATE_1_SHIFT 0 /* ASYNC_SAMPLE_RATE_1 - [4:0] */ +#define ARIZONA_ASYNC_SAMPLE_RATE_1_WIDTH 5 /* ASYNC_SAMPLE_RATE_1 - [4:0] */ + +/* + * R276 (0x114) - Async sample rate 2 + */ +#define ARIZONA_ASYNC_SAMPLE_RATE_2_MASK 0x001F /* ASYNC_SAMPLE_RATE_2 - [4:0] */ +#define ARIZONA_ASYNC_SAMPLE_RATE_2_SHIFT 0 /* ASYNC_SAMPLE_RATE_2 - [4:0] */ +#define ARIZONA_ASYNC_SAMPLE_RATE_2_WIDTH 5 /* ASYNC_SAMPLE_RATE_2 - [4:0] */ /* * R283 (0x11B) - Async sample rate 1 status */ -#define ARIZONA_ASYNC_SAMPLE_RATE_STS_MASK 0x001F /* ASYNC_SAMPLE_RATE_STS - [4:0] */ -#define ARIZONA_ASYNC_SAMPLE_RATE_STS_SHIFT 0 /* ASYNC_SAMPLE_RATE_STS - [4:0] */ -#define ARIZONA_ASYNC_SAMPLE_RATE_STS_WIDTH 5 /* ASYNC_SAMPLE_RATE_STS - [4:0] */ +#define ARIZONA_ASYNC_SAMPLE_RATE_1_STS_MASK 0x001F /* ASYNC_SAMPLE_RATE_1_STS - [4:0] */ +#define ARIZONA_ASYNC_SAMPLE_RATE_1_STS_SHIFT 0 /* ASYNC_SAMPLE_RATE_1_STS - [4:0] */ +#define ARIZONA_ASYNC_SAMPLE_RATE_1_STS_WIDTH 5 /* ASYNC_SAMPLE_RATE_1_STS - [4:0] */ + +/* + * R284 (0x11C) - Async sample rate 2 status + */ +#define ARIZONA_ASYNC_SAMPLE_RATE_2_STS_MASK 0x001F /* ASYNC_SAMPLE_RATE_2_STS - [4:0] */ +#define ARIZONA_ASYNC_SAMPLE_RATE_2_STS_SHIFT 0 /* ASYNC_SAMPLE_RATE_2_STS - [4:0] */ +#define ARIZONA_ASYNC_SAMPLE_RATE_2_STS_WIDTH 5 /* ASYNC_SAMPLE_RATE_2_STS - [4:0] */ /* * R329 (0x149) - Output system clock diff --git a/include/linux/mfd/core.h b/include/linux/mfd/core.h index f543de91ce19..73e1709d4c09 100644 --- a/include/linux/mfd/core.h +++ b/include/linux/mfd/core.h @@ -44,6 +44,9 @@ struct mfd_cell { */ const char *of_compatible; + /* Matches ACPI PNP id, either _HID or _CID */ + const char *acpi_pnpid; + /* * These resources can be specified relative to the parent device. * For accessing hardware you should use resources from the platform dev diff --git a/include/linux/mfd/cros_ec.h b/include/linux/mfd/cros_ec.h index fcbe9d129a9d..0e166b92f5b4 100644 --- a/include/linux/mfd/cros_ec.h +++ b/include/linux/mfd/cros_ec.h @@ -62,10 +62,6 @@ struct cros_ec_command { * @dev: Device pointer * @was_wake_device: true if this device was set to wake the system from * sleep at the last suspend - * @cmd_xfer: send command to EC and get response - * Returns the number of bytes received if the communication succeeded, but - * that doesn't mean the EC was happy with the command. The caller - * should check msg.result for the EC's result code. * * @priv: Private data * @irq: Interrupt to use @@ -82,6 +78,10 @@ struct cros_ec_command { * @dout_size: size of dout buffer to allocate (zero to use static dout) * @parent: pointer to parent device (e.g. i2c or spi device) * @wake_enabled: true if this device can wake the system from sleep + * @cmd_xfer: send command to EC and get response + * Returns the number of bytes received if the communication succeeded, but + * that doesn't mean the EC was happy with the command. The caller + * should check msg.result for the EC's result code. * @lock: one transaction at a time */ struct cros_ec_device { @@ -92,8 +92,6 @@ struct cros_ec_device { struct device *dev; bool was_wake_device; struct class *cros_class; - int (*cmd_xfer)(struct cros_ec_device *ec, - struct cros_ec_command *msg); /* These are used to implement the platform-specific interface */ void *priv; @@ -104,6 +102,8 @@ struct cros_ec_device { int dout_size; struct device *parent; bool wake_enabled; + int (*cmd_xfer)(struct cros_ec_device *ec, + struct cros_ec_command *msg); struct mutex lock; }; @@ -153,6 +153,18 @@ int cros_ec_check_result(struct cros_ec_device *ec_dev, struct cros_ec_command *msg); /** + * cros_ec_cmd_xfer - Send a command to the ChromeOS EC + * + * Call this to send a command to the ChromeOS EC. This should be used + * instead of calling the EC's cmd_xfer() callback directly. + * + * @ec_dev: EC device + * @msg: Message to write + */ +int cros_ec_cmd_xfer(struct cros_ec_device *ec_dev, + struct cros_ec_command *msg); + +/** * cros_ec_remove - Remove a ChromeOS EC * * Call this to deregister a ChromeOS EC, then clean up any private data. diff --git a/include/linux/mfd/cros_ec_commands.h b/include/linux/mfd/cros_ec_commands.h index 7853a6410d14..a49cd41feea7 100644 --- a/include/linux/mfd/cros_ec_commands.h +++ b/include/linux/mfd/cros_ec_commands.h @@ -1928,9 +1928,6 @@ struct ec_response_power_info { #define EC_CMD_I2C_PASSTHRU 0x9e -/* Slave address is 10 (not 7) bit */ -#define EC_I2C_FLAG_10BIT (1 << 16) - /* Read data; if not present, message is a write */ #define EC_I2C_FLAG_READ (1 << 15) diff --git a/include/linux/mfd/da9052/da9052.h b/include/linux/mfd/da9052/da9052.h index bba65f51a0b5..c18a4c19d6fc 100644 --- a/include/linux/mfd/da9052/da9052.h +++ b/include/linux/mfd/da9052/da9052.h @@ -211,7 +211,7 @@ static inline int da9052_reg_update(struct da9052 *da9052, unsigned char reg, int da9052_device_init(struct da9052 *da9052, u8 chip_id); void da9052_device_exit(struct da9052 *da9052); -extern struct regmap_config da9052_regmap_config; +extern const struct regmap_config da9052_regmap_config; int da9052_irq_init(struct da9052 *da9052); int da9052_irq_exit(struct da9052 *da9052); diff --git a/include/linux/mfd/davinci_voicecodec.h b/include/linux/mfd/davinci_voicecodec.h index 5166935ce66d..cb01496bfa49 100644 --- a/include/linux/mfd/davinci_voicecodec.h +++ b/include/linux/mfd/davinci_voicecodec.h @@ -21,7 +21,7 @@ */ #ifndef __LINUX_MFD_DAVINCI_VOICECODEC_H_ -#define __LINUX_MFD_DAVINIC_VOICECODEC_H_ +#define __LINUX_MFD_DAVINCI_VOICECODEC_H_ #include <linux/kernel.h> #include <linux/platform_device.h> diff --git a/include/linux/mfd/hi6421-pmic.h b/include/linux/mfd/hi6421-pmic.h new file mode 100644 index 000000000000..587273e35acf --- /dev/null +++ b/include/linux/mfd/hi6421-pmic.h @@ -0,0 +1,41 @@ +/* + * Header file for device driver Hi6421 PMIC + * + * Copyright (c) <2011-2014> HiSilicon Technologies Co., Ltd. + * http://www.hisilicon.com + * Copyright (c) <2013-2014> Linaro Ltd. + * http://www.linaro.org + * + * Author: Guodong Xu <guodong.xu@linaro.org> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef __HI6421_PMIC_H +#define __HI6421_PMIC_H + +/* Hi6421 registers are mapped to memory bus in 4 bytes stride */ +#define HI6421_REG_TO_BUS_ADDR(x) (x << 2) + +/* Hi6421 maximum register number */ +#define HI6421_REG_MAX 0xFF + +/* Hi6421 OCP (over current protection) and DEB (debounce) control register */ +#define HI6421_OCP_DEB_CTRL_REG HI6421_REG_TO_BUS_ADDR(0x51) +#define HI6421_OCP_DEB_SEL_MASK 0x0C +#define HI6421_OCP_DEB_SEL_8MS 0x00 +#define HI6421_OCP_DEB_SEL_16MS 0x04 +#define HI6421_OCP_DEB_SEL_32MS 0x08 +#define HI6421_OCP_DEB_SEL_64MS 0x0C +#define HI6421_OCP_EN_DEBOUNCE_MASK 0x02 +#define HI6421_OCP_EN_DEBOUNCE_ENABLE 0x02 +#define HI6421_OCP_AUTO_STOP_MASK 0x01 +#define HI6421_OCP_AUTO_STOP_ENABLE 0x01 + +struct hi6421_pmic { + struct regmap *regmap; +}; + +#endif /* __HI6421_PMIC_H */ diff --git a/include/linux/mfd/max14577-private.h b/include/linux/mfd/max14577-private.h index 499253604026..f01c1fae4d84 100644 --- a/include/linux/mfd/max14577-private.h +++ b/include/linux/mfd/max14577-private.h @@ -72,15 +72,33 @@ enum max14577_muic_reg { MAX14577_MUIC_REG_END, }; +/* + * Combined charger types for max14577 and max77836. + * + * On max14577 three lower bits map to STATUS2/CHGTYP field. + * However the max77836 has different two last values of STATUS2/CHGTYP. + * To indicate the difference enum has two additional values for max77836. + * These values are just a register value bitwise OR with 0x8. + */ enum max14577_muic_charger_type { - MAX14577_CHARGER_TYPE_NONE = 0, - MAX14577_CHARGER_TYPE_USB, - MAX14577_CHARGER_TYPE_DOWNSTREAM_PORT, - MAX14577_CHARGER_TYPE_DEDICATED_CHG, - MAX14577_CHARGER_TYPE_SPECIAL_500MA, - MAX14577_CHARGER_TYPE_SPECIAL_1A, - MAX14577_CHARGER_TYPE_RESERVED, - MAX14577_CHARGER_TYPE_DEAD_BATTERY = 7, + MAX14577_CHARGER_TYPE_NONE = 0x0, + MAX14577_CHARGER_TYPE_USB = 0x1, + MAX14577_CHARGER_TYPE_DOWNSTREAM_PORT = 0x2, + MAX14577_CHARGER_TYPE_DEDICATED_CHG = 0x3, + MAX14577_CHARGER_TYPE_SPECIAL_500MA = 0x4, + /* Special 1A or 2A charger */ + MAX14577_CHARGER_TYPE_SPECIAL_1A = 0x5, + /* max14577: reserved, used on max77836 */ + MAX14577_CHARGER_TYPE_RESERVED = 0x6, + /* max14577: dead-battery charing with maximum current 100mA */ + MAX14577_CHARGER_TYPE_DEAD_BATTERY = 0x7, + /* + * max77836: special charger (bias on D+/D-), + * matches register value of 0x6 + */ + MAX77836_CHARGER_TYPE_SPECIAL_BIAS = 0xe, + /* max77836: reserved, register value 0x7 */ + MAX77836_CHARGER_TYPE_RESERVED = 0xf, }; /* MAX14577 interrupts */ @@ -121,13 +139,15 @@ enum max14577_muic_charger_type { #define STATUS2_CHGTYP_SHIFT 0 #define STATUS2_CHGDETRUN_SHIFT 3 #define STATUS2_DCDTMR_SHIFT 4 -#define STATUS2_DBCHG_SHIFT 5 +#define MAX14577_STATUS2_DBCHG_SHIFT 5 +#define MAX77836_STATUS2_DXOVP_SHIFT 5 #define STATUS2_VBVOLT_SHIFT 6 #define MAX77836_STATUS2_VIDRM_SHIFT 7 #define STATUS2_CHGTYP_MASK (0x7 << STATUS2_CHGTYP_SHIFT) #define STATUS2_CHGDETRUN_MASK BIT(STATUS2_CHGDETRUN_SHIFT) #define STATUS2_DCDTMR_MASK BIT(STATUS2_DCDTMR_SHIFT) -#define STATUS2_DBCHG_MASK BIT(STATUS2_DBCHG_SHIFT) +#define MAX14577_STATUS2_DBCHG_MASK BIT(MAX14577_STATUS2_DBCHG_SHIFT) +#define MAX77836_STATUS2_DXOVP_MASK BIT(MAX77836_STATUS2_DXOVP_SHIFT) #define STATUS2_VBVOLT_MASK BIT(STATUS2_VBVOLT_SHIFT) #define MAX77836_STATUS2_VIDRM_MASK BIT(MAX77836_STATUS2_VIDRM_SHIFT) @@ -177,9 +197,11 @@ enum max14577_muic_charger_type { #define CTRL3_JIGSET_SHIFT 0 #define CTRL3_BOOTSET_SHIFT 2 #define CTRL3_ADCDBSET_SHIFT 4 +#define CTRL3_WBTH_SHIFT 6 #define CTRL3_JIGSET_MASK (0x3 << CTRL3_JIGSET_SHIFT) #define CTRL3_BOOTSET_MASK (0x3 << CTRL3_BOOTSET_SHIFT) #define CTRL3_ADCDBSET_MASK (0x3 << CTRL3_ADCDBSET_SHIFT) +#define CTRL3_WBTH_MASK (0x3 << CTRL3_WBTH_SHIFT) /* Slave addr = 0x4A: Charger */ enum max14577_charger_reg { @@ -210,16 +232,20 @@ enum max14577_charger_reg { #define CDETCTRL1_CHGTYPMAN_SHIFT 1 #define CDETCTRL1_DCDEN_SHIFT 2 #define CDETCTRL1_DCD2SCT_SHIFT 3 -#define CDETCTRL1_DCHKTM_SHIFT 4 -#define CDETCTRL1_DBEXIT_SHIFT 5 +#define MAX14577_CDETCTRL1_DCHKTM_SHIFT 4 +#define MAX77836_CDETCTRL1_CDLY_SHIFT 4 +#define MAX14577_CDETCTRL1_DBEXIT_SHIFT 5 +#define MAX77836_CDETCTRL1_DCDCPL_SHIFT 5 #define CDETCTRL1_DBIDLE_SHIFT 6 #define CDETCTRL1_CDPDET_SHIFT 7 #define CDETCTRL1_CHGDETEN_MASK BIT(CDETCTRL1_CHGDETEN_SHIFT) #define CDETCTRL1_CHGTYPMAN_MASK BIT(CDETCTRL1_CHGTYPMAN_SHIFT) #define CDETCTRL1_DCDEN_MASK BIT(CDETCTRL1_DCDEN_SHIFT) #define CDETCTRL1_DCD2SCT_MASK BIT(CDETCTRL1_DCD2SCT_SHIFT) -#define CDETCTRL1_DCHKTM_MASK BIT(CDETCTRL1_DCHKTM_SHIFT) -#define CDETCTRL1_DBEXIT_MASK BIT(CDETCTRL1_DBEXIT_SHIFT) +#define MAX14577_CDETCTRL1_DCHKTM_MASK BIT(MAX14577_CDETCTRL1_DCHKTM_SHIFT) +#define MAX77836_CDETCTRL1_CDDLY_MASK BIT(MAX77836_CDETCTRL1_CDDLY_SHIFT) +#define MAX14577_CDETCTRL1_DBEXIT_MASK BIT(MAX14577_CDETCTRL1_DBEXIT_SHIFT) +#define MAX77836_CDETCTRL1_DCDCPL_MASK BIT(MAX77836_CDETCTRL1_DCDCPL_SHIFT) #define CDETCTRL1_DBIDLE_MASK BIT(CDETCTRL1_DBIDLE_SHIFT) #define CDETCTRL1_CDPDET_MASK BIT(CDETCTRL1_CDPDET_SHIFT) @@ -255,17 +281,36 @@ enum max14577_charger_reg { #define CHGCTRL7_OTPCGHCVS_SHIFT 0 #define CHGCTRL7_OTPCGHCVS_MASK (0x3 << CHGCTRL7_OTPCGHCVS_SHIFT) -/* MAX14577 regulator current limits (as in CHGCTRL4 register), uA */ -#define MAX14577_REGULATOR_CURRENT_LIMIT_MIN 90000 -#define MAX14577_REGULATOR_CURRENT_LIMIT_HIGH_START 200000 -#define MAX14577_REGULATOR_CURRENT_LIMIT_HIGH_STEP 50000 -#define MAX14577_REGULATOR_CURRENT_LIMIT_MAX 950000 - -/* MAX77836 regulator current limits (as in CHGCTRL4 register), uA */ -#define MAX77836_REGULATOR_CURRENT_LIMIT_MIN 45000 -#define MAX77836_REGULATOR_CURRENT_LIMIT_HIGH_START 100000 -#define MAX77836_REGULATOR_CURRENT_LIMIT_HIGH_STEP 25000 -#define MAX77836_REGULATOR_CURRENT_LIMIT_MAX 475000 +/* MAX14577 charger current limits (as in CHGCTRL4 register), uA */ +#define MAX14577_CHARGER_CURRENT_LIMIT_MIN 90000U +#define MAX14577_CHARGER_CURRENT_LIMIT_HIGH_START 200000U +#define MAX14577_CHARGER_CURRENT_LIMIT_HIGH_STEP 50000U +#define MAX14577_CHARGER_CURRENT_LIMIT_MAX 950000U + +/* MAX77836 charger current limits (as in CHGCTRL4 register), uA */ +#define MAX77836_CHARGER_CURRENT_LIMIT_MIN 45000U +#define MAX77836_CHARGER_CURRENT_LIMIT_HIGH_START 100000U +#define MAX77836_CHARGER_CURRENT_LIMIT_HIGH_STEP 25000U +#define MAX77836_CHARGER_CURRENT_LIMIT_MAX 475000U + +/* + * MAX14577 charger End-Of-Charge current limits + * (as in CHGCTRL5 register), uA + */ +#define MAX14577_CHARGER_EOC_CURRENT_LIMIT_MIN 50000U +#define MAX14577_CHARGER_EOC_CURRENT_LIMIT_STEP 10000U +#define MAX14577_CHARGER_EOC_CURRENT_LIMIT_MAX 200000U + +/* + * MAX14577/MAX77836 Battery Constant Voltage + * (as in CHGCTRL3 register), uV + */ +#define MAXIM_CHARGER_CONSTANT_VOLTAGE_MIN 4000000U +#define MAXIM_CHARGER_CONSTANT_VOLTAGE_STEP 20000U +#define MAXIM_CHARGER_CONSTANT_VOLTAGE_MAX 4350000U + +/* Default value for fast charge timer, in hours */ +#define MAXIM_CHARGER_FAST_CHARGE_TIMER_DEFAULT 5 /* MAX14577 regulator SFOUT LDO voltage, fixed, uV */ #define MAX14577_REGULATOR_SAFEOUT_VOLTAGE 4900000 diff --git a/include/linux/mfd/max14577.h b/include/linux/mfd/max14577.h index c83fbed1c7b6..ccfaf952c31b 100644 --- a/include/linux/mfd/max14577.h +++ b/include/linux/mfd/max14577.h @@ -54,6 +54,13 @@ struct max14577_regulator_platform_data { struct device_node *of_node; }; +struct max14577_charger_platform_data { + u32 constant_uvolt; + u32 fast_charge_uamp; + u32 eoc_uamp; + u32 ovp_uvolt; +}; + /* * MAX14577 MFD platform data */ @@ -74,4 +81,27 @@ struct max14577_platform_data { struct max14577_regulator_platform_data *regulators; }; +/* + * Valid limits of current for max14577 and max77836 chargers. + * They must correspond to MBCICHWRCL and MBCICHWRCH fields in CHGCTRL4 + * register for given chipset. + */ +struct maxim_charger_current { + /* Minimal current, set in CHGCTRL4/MBCICHWRCL, uA */ + unsigned int min; + /* + * Minimal current when high setting is active, + * set in CHGCTRL4/MBCICHWRCH, uA + */ + unsigned int high_start; + /* Value of one step in high setting, uA */ + unsigned int high_step; + /* Maximum current of high setting, uA */ + unsigned int max; +}; + +extern const struct maxim_charger_current maxim_charger_currents[]; +extern int maxim_charger_calc_reg_current(const struct maxim_charger_current *limits, + unsigned int min_ua, unsigned int max_ua, u8 *dst); + #endif /* __MAX14577_H__ */ diff --git a/include/linux/mfd/max77693-private.h b/include/linux/mfd/max77693-private.h index d0e578fd7053..fc17d56581b2 100644 --- a/include/linux/mfd/max77693-private.h +++ b/include/linux/mfd/max77693-private.h @@ -46,7 +46,7 @@ enum max77693_pmic_reg { MAX77693_LED_REG_VOUT_FLASH2 = 0x0C, MAX77693_LED_REG_FLASH_INT = 0x0E, MAX77693_LED_REG_FLASH_INT_MASK = 0x0F, - MAX77693_LED_REG_FLASH_INT_STATUS = 0x10, + MAX77693_LED_REG_FLASH_STATUS = 0x10, MAX77693_PMIC_REG_PMIC_ID1 = 0x20, MAX77693_PMIC_REG_PMIC_ID2 = 0x21, @@ -85,6 +85,65 @@ enum max77693_pmic_reg { MAX77693_PMIC_REG_END, }; +/* MAX77693 ITORCH register */ +#define TORCH_IOUT1_SHIFT 0 +#define TORCH_IOUT2_SHIFT 4 +#define TORCH_IOUT_MIN 15625 +#define TORCH_IOUT_MAX 250000 +#define TORCH_IOUT_STEP 15625 + +/* MAX77693 IFLASH1 and IFLASH2 registers */ +#define FLASH_IOUT_MIN 15625 +#define FLASH_IOUT_MAX_1LED 1000000 +#define FLASH_IOUT_MAX_2LEDS 625000 +#define FLASH_IOUT_STEP 15625 + +/* MAX77693 TORCH_TIMER register */ +#define TORCH_TMR_NO_TIMER 0x40 +#define TORCH_TIMEOUT_MIN 262000 +#define TORCH_TIMEOUT_MAX 15728000 + +/* MAX77693 FLASH_TIMER register */ +#define FLASH_TMR_LEVEL 0x80 +#define FLASH_TIMEOUT_MIN 62500 +#define FLASH_TIMEOUT_MAX 1000000 +#define FLASH_TIMEOUT_STEP 62500 + +/* MAX77693 FLASH_EN register */ +#define FLASH_EN_OFF 0x0 +#define FLASH_EN_FLASH 0x1 +#define FLASH_EN_TORCH 0x2 +#define FLASH_EN_ON 0x3 +#define FLASH_EN_SHIFT(x) (6 - ((x) - 1) * 2) +#define TORCH_EN_SHIFT(x) (2 - ((x) - 1) * 2) + +/* MAX77693 MAX_FLASH1 register */ +#define MAX_FLASH1_MAX_FL_EN 0x80 +#define MAX_FLASH1_VSYS_MIN 2400 +#define MAX_FLASH1_VSYS_MAX 3400 +#define MAX_FLASH1_VSYS_STEP 33 + +/* MAX77693 VOUT_CNTL register */ +#define FLASH_BOOST_FIXED 0x04 +#define FLASH_BOOST_LEDNUM_2 0x80 + +/* MAX77693 VOUT_FLASH1 register */ +#define FLASH_VOUT_MIN 3300 +#define FLASH_VOUT_MAX 5500 +#define FLASH_VOUT_STEP 25 +#define FLASH_VOUT_RMIN 0x0c + +/* MAX77693 FLASH_STATUS register */ +#define FLASH_STATUS_FLASH_ON BIT(3) +#define FLASH_STATUS_TORCH_ON BIT(2) + +/* MAX77693 FLASH_INT register */ +#define FLASH_INT_FLED2_OPEN BIT(0) +#define FLASH_INT_FLED2_SHORT BIT(1) +#define FLASH_INT_FLED1_OPEN BIT(2) +#define FLASH_INT_FLED1_SHORT BIT(3) +#define FLASH_INT_OVER_CURRENT BIT(4) + /* MAX77693 CHG_CNFG_00 register */ #define CHG_CNFG_00_CHG_MASK 0x1 #define CHG_CNFG_00_BUCK_MASK 0x4 diff --git a/include/linux/mfd/max77693.h b/include/linux/mfd/max77693.h index 3f3dc45f93ee..f0b6585cd874 100644 --- a/include/linux/mfd/max77693.h +++ b/include/linux/mfd/max77693.h @@ -63,6 +63,45 @@ struct max77693_muic_platform_data { int path_uart; }; +/* MAX77693 led flash */ + +/* triggers */ +enum max77693_led_trigger { + MAX77693_LED_TRIG_OFF, + MAX77693_LED_TRIG_FLASH, + MAX77693_LED_TRIG_TORCH, + MAX77693_LED_TRIG_EXT, + MAX77693_LED_TRIG_SOFT, +}; + +/* trigger types */ +enum max77693_led_trigger_type { + MAX77693_LED_TRIG_TYPE_EDGE, + MAX77693_LED_TRIG_TYPE_LEVEL, +}; + +/* boost modes */ +enum max77693_led_boost_mode { + MAX77693_LED_BOOST_NONE, + MAX77693_LED_BOOST_ADAPTIVE, + MAX77693_LED_BOOST_FIXED, +}; + +struct max77693_led_platform_data { + u32 fleds[2]; + u32 iout_torch[2]; + u32 iout_flash[2]; + u32 trigger[2]; + u32 trigger_type[2]; + u32 num_leds; + u32 boost_mode; + u32 flash_timeout; + u32 boost_vout; + u32 low_vsys; +}; + +/* MAX77693 */ + struct max77693_platform_data { /* regulator data */ struct max77693_regulator_data *regulators; @@ -70,5 +109,6 @@ struct max77693_platform_data { /* muic data */ struct max77693_muic_platform_data *muic_data; + struct max77693_led_platform_data *led_data; }; #endif /* __LINUX_MFD_MAX77693_H */ diff --git a/include/linux/mfd/rk808.h b/include/linux/mfd/rk808.h new file mode 100644 index 000000000000..fb09312d854b --- /dev/null +++ b/include/linux/mfd/rk808.h @@ -0,0 +1,196 @@ +/* + * rk808.h for Rockchip RK808 + * + * Copyright (c) 2014, Fuzhou Rockchip Electronics Co., Ltd + * + * Author: Chris Zhong <zyw@rock-chips.com> + * Author: Zhang Qing <zhangqing@rock-chips.com> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + */ + +#ifndef __LINUX_REGULATOR_rk808_H +#define __LINUX_REGULATOR_rk808_H + +#include <linux/regulator/machine.h> +#include <linux/regmap.h> + +/* + * rk808 Global Register Map. + */ + +#define RK808_DCDC1 0 /* (0+RK808_START) */ +#define RK808_LDO1 4 /* (4+RK808_START) */ +#define RK808_NUM_REGULATORS 14 + +enum rk808_reg { + RK808_ID_DCDC1, + RK808_ID_DCDC2, + RK808_ID_DCDC3, + RK808_ID_DCDC4, + RK808_ID_LDO1, + RK808_ID_LDO2, + RK808_ID_LDO3, + RK808_ID_LDO4, + RK808_ID_LDO5, + RK808_ID_LDO6, + RK808_ID_LDO7, + RK808_ID_LDO8, + RK808_ID_SWITCH1, + RK808_ID_SWITCH2, +}; + +#define RK808_SECONDS_REG 0x00 +#define RK808_MINUTES_REG 0x01 +#define RK808_HOURS_REG 0x02 +#define RK808_DAYS_REG 0x03 +#define RK808_MONTHS_REG 0x04 +#define RK808_YEARS_REG 0x05 +#define RK808_WEEKS_REG 0x06 +#define RK808_ALARM_SECONDS_REG 0x08 +#define RK808_ALARM_MINUTES_REG 0x09 +#define RK808_ALARM_HOURS_REG 0x0a +#define RK808_ALARM_DAYS_REG 0x0b +#define RK808_ALARM_MONTHS_REG 0x0c +#define RK808_ALARM_YEARS_REG 0x0d +#define RK808_RTC_CTRL_REG 0x10 +#define RK808_RTC_STATUS_REG 0x11 +#define RK808_RTC_INT_REG 0x12 +#define RK808_RTC_COMP_LSB_REG 0x13 +#define RK808_RTC_COMP_MSB_REG 0x14 +#define RK808_CLK32OUT_REG 0x20 +#define RK808_VB_MON_REG 0x21 +#define RK808_THERMAL_REG 0x22 +#define RK808_DCDC_EN_REG 0x23 +#define RK808_LDO_EN_REG 0x24 +#define RK808_SLEEP_SET_OFF_REG1 0x25 +#define RK808_SLEEP_SET_OFF_REG2 0x26 +#define RK808_DCDC_UV_STS_REG 0x27 +#define RK808_DCDC_UV_ACT_REG 0x28 +#define RK808_LDO_UV_STS_REG 0x29 +#define RK808_LDO_UV_ACT_REG 0x2a +#define RK808_DCDC_PG_REG 0x2b +#define RK808_LDO_PG_REG 0x2c +#define RK808_VOUT_MON_TDB_REG 0x2d +#define RK808_BUCK1_CONFIG_REG 0x2e +#define RK808_BUCK1_ON_VSEL_REG 0x2f +#define RK808_BUCK1_SLP_VSEL_REG 0x30 +#define RK808_BUCK1_DVS_VSEL_REG 0x31 +#define RK808_BUCK2_CONFIG_REG 0x32 +#define RK808_BUCK2_ON_VSEL_REG 0x33 +#define RK808_BUCK2_SLP_VSEL_REG 0x34 +#define RK808_BUCK2_DVS_VSEL_REG 0x35 +#define RK808_BUCK3_CONFIG_REG 0x36 +#define RK808_BUCK4_CONFIG_REG 0x37 +#define RK808_BUCK4_ON_VSEL_REG 0x38 +#define RK808_BUCK4_SLP_VSEL_REG 0x39 +#define RK808_BOOST_CONFIG_REG 0x3a +#define RK808_LDO1_ON_VSEL_REG 0x3b +#define RK808_LDO1_SLP_VSEL_REG 0x3c +#define RK808_LDO2_ON_VSEL_REG 0x3d +#define RK808_LDO2_SLP_VSEL_REG 0x3e +#define RK808_LDO3_ON_VSEL_REG 0x3f +#define RK808_LDO3_SLP_VSEL_REG 0x40 +#define RK808_LDO4_ON_VSEL_REG 0x41 +#define RK808_LDO4_SLP_VSEL_REG 0x42 +#define RK808_LDO5_ON_VSEL_REG 0x43 +#define RK808_LDO5_SLP_VSEL_REG 0x44 +#define RK808_LDO6_ON_VSEL_REG 0x45 +#define RK808_LDO6_SLP_VSEL_REG 0x46 +#define RK808_LDO7_ON_VSEL_REG 0x47 +#define RK808_LDO7_SLP_VSEL_REG 0x48 +#define RK808_LDO8_ON_VSEL_REG 0x49 +#define RK808_LDO8_SLP_VSEL_REG 0x4a +#define RK808_DEVCTRL_REG 0x4b +#define RK808_INT_STS_REG1 0x4c +#define RK808_INT_STS_MSK_REG1 0x4d +#define RK808_INT_STS_REG2 0x4e +#define RK808_INT_STS_MSK_REG2 0x4f +#define RK808_IO_POL_REG 0x50 + +/* IRQ Definitions */ +#define RK808_IRQ_VOUT_LO 0 +#define RK808_IRQ_VB_LO 1 +#define RK808_IRQ_PWRON 2 +#define RK808_IRQ_PWRON_LP 3 +#define RK808_IRQ_HOTDIE 4 +#define RK808_IRQ_RTC_ALARM 5 +#define RK808_IRQ_RTC_PERIOD 6 +#define RK808_IRQ_PLUG_IN_INT 7 +#define RK808_IRQ_PLUG_OUT_INT 8 +#define RK808_NUM_IRQ 9 + +#define RK808_IRQ_VOUT_LO_MSK BIT(0) +#define RK808_IRQ_VB_LO_MSK BIT(1) +#define RK808_IRQ_PWRON_MSK BIT(2) +#define RK808_IRQ_PWRON_LP_MSK BIT(3) +#define RK808_IRQ_HOTDIE_MSK BIT(4) +#define RK808_IRQ_RTC_ALARM_MSK BIT(5) +#define RK808_IRQ_RTC_PERIOD_MSK BIT(6) +#define RK808_IRQ_PLUG_IN_INT_MSK BIT(0) +#define RK808_IRQ_PLUG_OUT_INT_MSK BIT(1) + +#define RK808_VBAT_LOW_2V8 0x00 +#define RK808_VBAT_LOW_2V9 0x01 +#define RK808_VBAT_LOW_3V0 0x02 +#define RK808_VBAT_LOW_3V1 0x03 +#define RK808_VBAT_LOW_3V2 0x04 +#define RK808_VBAT_LOW_3V3 0x05 +#define RK808_VBAT_LOW_3V4 0x06 +#define RK808_VBAT_LOW_3V5 0x07 +#define VBAT_LOW_VOL_MASK (0x07 << 0) +#define EN_VABT_LOW_SHUT_DOWN (0x00 << 4) +#define EN_VBAT_LOW_IRQ (0x1 << 4) +#define VBAT_LOW_ACT_MASK (0x1 << 4) + +#define BUCK_ILMIN_MASK (7 << 0) +#define BOOST_ILMIN_MASK (7 << 0) +#define BUCK1_RATE_MASK (3 << 3) +#define BUCK2_RATE_MASK (3 << 3) +#define MASK_ALL 0xff + +#define SWITCH2_EN BIT(6) +#define SWITCH1_EN BIT(5) +#define DEV_OFF_RST BIT(3) + +#define VB_LO_ACT BIT(4) +#define VB_LO_SEL_3500MV (7 << 0) + +#define VOUT_LO_INT BIT(0) +#define CLK32KOUT2_EN BIT(0) + +enum { + BUCK_ILMIN_50MA, + BUCK_ILMIN_100MA, + BUCK_ILMIN_150MA, + BUCK_ILMIN_200MA, + BUCK_ILMIN_250MA, + BUCK_ILMIN_300MA, + BUCK_ILMIN_350MA, + BUCK_ILMIN_400MA, +}; + +enum { + BOOST_ILMIN_75MA, + BOOST_ILMIN_100MA, + BOOST_ILMIN_125MA, + BOOST_ILMIN_150MA, + BOOST_ILMIN_175MA, + BOOST_ILMIN_200MA, + BOOST_ILMIN_225MA, + BOOST_ILMIN_250MA, +}; + +struct rk808 { + struct i2c_client *i2c; + struct regmap_irq_chip_data *irq_data; + struct regmap *regmap; +}; +#endif /* __LINUX_REGULATOR_rk808_H */ diff --git a/include/linux/mfd/rn5t618.h b/include/linux/mfd/rn5t618.h new file mode 100644 index 000000000000..c72d5344f3b3 --- /dev/null +++ b/include/linux/mfd/rn5t618.h @@ -0,0 +1,228 @@ +/* + * MFD core driver for Ricoh RN5T618 PMIC + * + * Copyright (C) 2014 Beniamino Galvani <b.galvani@gmail.com> + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + */ + +#ifndef __LINUX_MFD_RN5T618_H +#define __LINUX_MFD_RN5T618_H + +#include <linux/regmap.h> + +#define RN5T618_LSIVER 0x00 +#define RN5T618_OTPVER 0x01 +#define RN5T618_IODAC 0x02 +#define RN5T618_VINDAC 0x03 +#define RN5T618_CPUCNT 0x06 +#define RN5T618_PSWR 0x07 +#define RN5T618_PONHIS 0x09 +#define RN5T618_POFFHIS 0x0a +#define RN5T618_WATCHDOG 0x0b +#define RN5T618_WATCHDOGCNT 0x0c +#define RN5T618_PWRFUNC 0x0d +#define RN5T618_SLPCNT 0x0e +#define RN5T618_REPCNT 0x0f +#define RN5T618_PWRONTIMSET 0x10 +#define RN5T618_NOETIMSETCNT 0x11 +#define RN5T618_PWRIREN 0x12 +#define RN5T618_PWRIRQ 0x13 +#define RN5T618_PWRMON 0x14 +#define RN5T618_PWRIRSEL 0x15 +#define RN5T618_DC1_SLOT 0x16 +#define RN5T618_DC2_SLOT 0x17 +#define RN5T618_DC3_SLOT 0x18 +#define RN5T618_LDO1_SLOT 0x1b +#define RN5T618_LDO2_SLOT 0x1c +#define RN5T618_LDO3_SLOT 0x1d +#define RN5T618_LDO4_SLOT 0x1e +#define RN5T618_LDO5_SLOT 0x1f +#define RN5T618_PSO0_SLOT 0x25 +#define RN5T618_PSO1_SLOT 0x26 +#define RN5T618_PSO2_SLOT 0x27 +#define RN5T618_PSO3_SLOT 0x28 +#define RN5T618_LDORTC1_SLOT 0x2a +#define RN5T618_DC1CTL 0x2c +#define RN5T618_DC1CTL2 0x2d +#define RN5T618_DC2CTL 0x2e +#define RN5T618_DC2CTL2 0x2f +#define RN5T618_DC3CTL 0x30 +#define RN5T618_DC3CTL2 0x31 +#define RN5T618_DC1DAC 0x36 +#define RN5T618_DC2DAC 0x37 +#define RN5T618_DC3DAC 0x38 +#define RN5T618_DC1DAC_SLP 0x3b +#define RN5T618_DC2DAC_SLP 0x3c +#define RN5T618_DC3DAC_SLP 0x3d +#define RN5T618_DCIREN 0x40 +#define RN5T618_DCIRQ 0x41 +#define RN5T618_DCIRMON 0x42 +#define RN5T618_LDOEN1 0x44 +#define RN5T618_LDOEN2 0x45 +#define RN5T618_LDODIS 0x46 +#define RN5T618_LDO1DAC 0x4c +#define RN5T618_LDO2DAC 0x4d +#define RN5T618_LDO3DAC 0x4e +#define RN5T618_LDO4DAC 0x4f +#define RN5T618_LDO5DAC 0x50 +#define RN5T618_LDORTCDAC 0x56 +#define RN5T618_LDORTC2DAC 0x57 +#define RN5T618_LDO1DAC_SLP 0x58 +#define RN5T618_LDO2DAC_SLP 0x59 +#define RN5T618_LDO3DAC_SLP 0x5a +#define RN5T618_LDO4DAC_SLP 0x5b +#define RN5T618_LDO5DAC_SLP 0x5c +#define RN5T618_ADCCNT1 0x64 +#define RN5T618_ADCCNT2 0x65 +#define RN5T618_ADCCNT3 0x66 +#define RN5T618_ILIMDATAH 0x68 +#define RN5T618_ILIMDATAL 0x69 +#define RN5T618_VBATDATAH 0x6a +#define RN5T618_VBATDATAL 0x6b +#define RN5T618_VADPDATAH 0x6c +#define RN5T618_VADPDATAL 0x6d +#define RN5T618_VUSBDATAH 0x6e +#define RN5T618_VUSBDATAL 0x6f +#define RN5T618_VSYSDATAH 0x70 +#define RN5T618_VSYSDATAL 0x71 +#define RN5T618_VTHMDATAH 0x72 +#define RN5T618_VTHMDATAL 0x73 +#define RN5T618_AIN1DATAH 0x74 +#define RN5T618_AIN1DATAL 0x75 +#define RN5T618_AIN0DATAH 0x76 +#define RN5T618_AIN0DATAL 0x77 +#define RN5T618_ILIMTHL 0x78 +#define RN5T618_ILIMTHH 0x79 +#define RN5T618_VBATTHL 0x7a +#define RN5T618_VBATTHH 0x7b +#define RN5T618_VADPTHL 0x7c +#define RN5T618_VADPTHH 0x7d +#define RN5T618_VUSBTHL 0x7e +#define RN5T618_VUSBTHH 0x7f +#define RN5T618_VSYSTHL 0x80 +#define RN5T618_VSYSTHH 0x81 +#define RN5T618_VTHMTHL 0x82 +#define RN5T618_VTHMTHH 0x83 +#define RN5T618_AIN1THL 0x84 +#define RN5T618_AIN1THH 0x85 +#define RN5T618_AIN0THL 0x86 +#define RN5T618_AIN0THH 0x87 +#define RN5T618_EN_ADCIR1 0x88 +#define RN5T618_EN_ADCIR2 0x89 +#define RN5T618_EN_ADCIR3 0x8a +#define RN5T618_IR_ADC1 0x8c +#define RN5T618_IR_ADC2 0x8d +#define RN5T618_IR_ADC3 0x8e +#define RN5T618_IOSEL 0x90 +#define RN5T618_IOOUT 0x91 +#define RN5T618_GPEDGE1 0x92 +#define RN5T618_GPEDGE2 0x93 +#define RN5T618_EN_GPIR 0x94 +#define RN5T618_IR_GPR 0x95 +#define RN5T618_IR_GPF 0x96 +#define RN5T618_MON_IOIN 0x97 +#define RN5T618_GPLED_FUNC 0x98 +#define RN5T618_INTPOL 0x9c +#define RN5T618_INTEN 0x9d +#define RN5T618_INTMON 0x9e +#define RN5T618_PREVINDAC 0xb0 +#define RN5T618_BATDAC 0xb1 +#define RN5T618_CHGCTL1 0xb3 +#define RN5T618_CHGCTL2 0xb4 +#define RN5T618_VSYSSET 0xb5 +#define RN5T618_REGISET1 0xb6 +#define RN5T618_REGISET2 0xb7 +#define RN5T618_CHGISET 0xb8 +#define RN5T618_TIMSET 0xb9 +#define RN5T618_BATSET1 0xba +#define RN5T618_BATSET2 0xbb +#define RN5T618_DIESET 0xbc +#define RN5T618_CHGSTATE 0xbd +#define RN5T618_CHGCTRL_IRFMASK 0xbe +#define RN5T618_CHGSTAT_IRFMASK1 0xbf +#define RN5T618_CHGSTAT_IRFMASK2 0xc0 +#define RN5T618_CHGERR_IRFMASK 0xc1 +#define RN5T618_CHGCTRL_IRR 0xc2 +#define RN5T618_CHGSTAT_IRR1 0xc3 +#define RN5T618_CHGSTAT_IRR2 0xc4 +#define RN5T618_CHGERR_IRR 0xc5 +#define RN5T618_CHGCTRL_MONI 0xc6 +#define RN5T618_CHGSTAT_MONI1 0xc7 +#define RN5T618_CHGSTAT_MONI2 0xc8 +#define RN5T618_CHGERR_MONI 0xc9 +#define RN5T618_CHGCTRL_DETMOD1 0xca +#define RN5T618_CHGCTRL_DETMOD2 0xcb +#define RN5T618_CHGSTAT_DETMOD1 0xcc +#define RN5T618_CHGSTAT_DETMOD2 0xcd +#define RN5T618_CHGSTAT_DETMOD3 0xce +#define RN5T618_CHGERR_DETMOD1 0xcf +#define RN5T618_CHGERR_DETMOD2 0xd0 +#define RN5T618_CHGOSCCTL 0xd4 +#define RN5T618_CHGOSCSCORESET1 0xd5 +#define RN5T618_CHGOSCSCORESET2 0xd6 +#define RN5T618_CHGOSCSCORESET3 0xd7 +#define RN5T618_CHGOSCFREQSET1 0xd8 +#define RN5T618_CHGOSCFREQSET2 0xd9 +#define RN5T618_CONTROL 0xe0 +#define RN5T618_SOC 0xe1 +#define RN5T618_RE_CAP_H 0xe2 +#define RN5T618_RE_CAP_L 0xe3 +#define RN5T618_FA_CAP_H 0xe4 +#define RN5T618_FA_CAP_L 0xe5 +#define RN5T618_AGE 0xe6 +#define RN5T618_TT_EMPTY_H 0xe7 +#define RN5T618_TT_EMPTY_L 0xe8 +#define RN5T618_TT_FULL_H 0xe9 +#define RN5T618_TT_FULL_L 0xea +#define RN5T618_VOLTAGE_1 0xeb +#define RN5T618_VOLTAGE_0 0xec +#define RN5T618_TEMP_1 0xed +#define RN5T618_TEMP_0 0xee +#define RN5T618_CC_CTRL 0xef +#define RN5T618_CC_COUNT2 0xf0 +#define RN5T618_CC_COUNT1 0xf1 +#define RN5T618_CC_COUNT0 0xf2 +#define RN5T618_CC_SUMREG3 0xf3 +#define RN5T618_CC_SUMREG2 0xf4 +#define RN5T618_CC_SUMREG1 0xf5 +#define RN5T618_CC_SUMREG0 0xf6 +#define RN5T618_CC_OFFREG1 0xf7 +#define RN5T618_CC_OFFREG0 0xf8 +#define RN5T618_CC_GAINREG1 0xf9 +#define RN5T618_CC_GAINREG0 0xfa +#define RN5T618_CC_AVEREG1 0xfb +#define RN5T618_CC_AVEREG0 0xfc +#define RN5T618_MAX_REG 0xfc + +#define RN5T618_REPCNT_REPWRON BIT(0) +#define RN5T618_SLPCNT_SWPWROFF BIT(0) +#define RN5T618_WATCHDOG_WDOGEN BIT(2) +#define RN5T618_WATCHDOG_WDOGTIM_M (BIT(0) | BIT(1)) +#define RN5T618_WATCHDOG_WDOGTIM_S 0 +#define RN5T618_PWRIRQ_IR_WDOG BIT(6) + +enum { + RN5T618_DCDC1, + RN5T618_DCDC2, + RN5T618_DCDC3, + RN5T618_LDO1, + RN5T618_LDO2, + RN5T618_LDO3, + RN5T618_LDO4, + RN5T618_LDO5, + RN5T618_LDORTC1, + RN5T618_LDORTC2, + RN5T618_REG_NUM, +}; + +struct rn5t618 { + struct regmap *regmap; +}; + +#endif /* __LINUX_MFD_RN5T618_H */ diff --git a/include/linux/mfd/ti_am335x_tscadc.h b/include/linux/mfd/ti_am335x_tscadc.h index fb96c84dada5..e2e70053470e 100644 --- a/include/linux/mfd/ti_am335x_tscadc.h +++ b/include/linux/mfd/ti_am335x_tscadc.h @@ -155,6 +155,7 @@ struct ti_tscadc_dev { void __iomem *tscadc_base; int irq; int used_cells; /* 1-2 */ + int tsc_wires; int tsc_cell; /* -1 if not used */ int adc_cell; /* -1 if not used */ struct mfd_cell cells[TSCADC_CELLS]; diff --git a/include/linux/mfd/ti_ssp.h b/include/linux/mfd/ti_ssp.h deleted file mode 100644 index dbb4b43bd20e..000000000000 --- a/include/linux/mfd/ti_ssp.h +++ /dev/null @@ -1,93 +0,0 @@ -/* - * Sequencer Serial Port (SSP) driver for Texas Instruments' SoCs - * - * Copyright (C) 2010 Texas Instruments Inc - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - */ - -#ifndef __TI_SSP_H__ -#define __TI_SSP_H__ - -struct ti_ssp_dev_data { - const char *dev_name; - void *pdata; - size_t pdata_size; -}; - -struct ti_ssp_data { - unsigned long out_clock; - struct ti_ssp_dev_data dev_data[2]; -}; - -struct ti_ssp_spi_data { - unsigned long iosel; - int num_cs; - void (*select)(int cs); -}; - -/* - * Sequencer port IO pin configuration bits. These do not correlate 1-1 with - * the hardware. The iosel field in the port data combines iosel1 and iosel2, - * and is therefore not a direct map to register space. It is best to use the - * macros below to construct iosel values. - * - * least significant 16 bits --> iosel1 - * most significant 16 bits --> iosel2 - */ - -#define SSP_IN 0x0000 -#define SSP_DATA 0x0001 -#define SSP_CLOCK 0x0002 -#define SSP_CHIPSEL 0x0003 -#define SSP_OUT 0x0004 -#define SSP_PIN_SEL(pin, v) ((v) << ((pin) * 3)) -#define SSP_PIN_MASK(pin) SSP_PIN_SEL(pin, 0x7) -#define SSP_INPUT_SEL(pin) ((pin) << 16) - -/* Sequencer port config bits */ -#define SSP_EARLY_DIN BIT(8) -#define SSP_DELAY_DOUT BIT(9) - -/* Sequence map definitions */ -#define SSP_CLK_HIGH BIT(0) -#define SSP_CLK_LOW 0 -#define SSP_DATA_HIGH BIT(1) -#define SSP_DATA_LOW 0 -#define SSP_CS_HIGH BIT(2) -#define SSP_CS_LOW 0 -#define SSP_OUT_MODE BIT(3) -#define SSP_IN_MODE 0 -#define SSP_DATA_REG BIT(4) -#define SSP_ADDR_REG 0 - -#define SSP_OPCODE_DIRECT ((0x0) << 5) -#define SSP_OPCODE_TOGGLE ((0x1) << 5) -#define SSP_OPCODE_SHIFT ((0x2) << 5) -#define SSP_OPCODE_BRANCH0 ((0x4) << 5) -#define SSP_OPCODE_BRANCH1 ((0x5) << 5) -#define SSP_OPCODE_BRANCH ((0x6) << 5) -#define SSP_OPCODE_STOP ((0x7) << 5) -#define SSP_BRANCH(addr) ((addr) << 8) -#define SSP_COUNT(cycles) ((cycles) << 8) - -int ti_ssp_raw_read(struct device *dev); -int ti_ssp_raw_write(struct device *dev, u32 val); -int ti_ssp_load(struct device *dev, int offs, u32* prog, int len); -int ti_ssp_run(struct device *dev, u32 pc, u32 input, u32 *output); -int ti_ssp_set_mode(struct device *dev, int mode); -int ti_ssp_set_iosel(struct device *dev, u32 iosel); - -#endif /* __TI_SSP_H__ */ diff --git a/include/linux/mfd/tmio.h b/include/linux/mfd/tmio.h index 8f6f2e91e7ae..57388171610d 100644 --- a/include/linux/mfd/tmio.h +++ b/include/linux/mfd/tmio.h @@ -5,6 +5,7 @@ #include <linux/fb.h> #include <linux/io.h> #include <linux/jiffies.h> +#include <linux/mmc/card.h> #include <linux/platform_device.h> #include <linux/pm_runtime.h> @@ -83,6 +84,27 @@ */ #define TMIO_MMC_HAVE_HIGH_REG (1 << 6) +/* + * Some controllers have CMD12 automatically + * issue/non-issue register + */ +#define TMIO_MMC_HAVE_CMD12_CTRL (1 << 7) + +/* + * Some controllers needs to set 1 on SDIO status reserved bits + */ +#define TMIO_MMC_SDIO_STATUS_QUIRK (1 << 8) + +/* + * Some controllers have DMA enable/disable register + */ +#define TMIO_MMC_HAVE_CTL_DMA_REG (1 << 9) + +/* + * Some controllers allows to set SDx actual clock + */ +#define TMIO_MMC_CLK_ACTUAL (1 << 10) + int tmio_core_mmc_enable(void __iomem *cnf, int shift, unsigned long base); int tmio_core_mmc_resume(void __iomem *cnf, int shift, unsigned long base); void tmio_core_mmc_pwr(void __iomem *cnf, int shift, int state); @@ -96,6 +118,7 @@ struct tmio_mmc_dma { int slave_id_tx; int slave_id_rx; int alignment_shift; + dma_addr_t dma_rx_offset; bool (*filter)(struct dma_chan *chan, void *arg); }; @@ -120,6 +143,8 @@ struct tmio_mmc_data { /* clock management callbacks */ int (*clk_enable)(struct platform_device *pdev, unsigned int *f); void (*clk_disable)(struct platform_device *pdev); + int (*multi_io_quirk)(struct mmc_card *card, + unsigned int direction, int blk_size); }; /* diff --git a/include/linux/mfd/tps65217.h b/include/linux/mfd/tps65217.h index 95d6938737fd..ac7fba44d7e4 100644 --- a/include/linux/mfd/tps65217.h +++ b/include/linux/mfd/tps65217.h @@ -60,6 +60,8 @@ #define TPS65217_REG_SEQ5 0X1D #define TPS65217_REG_SEQ6 0X1E +#define TPS65217_REG_MAX TPS65217_REG_SEQ6 + /* Register field definitions */ #define TPS65217_CHIPID_CHIP_MASK 0xF0 #define TPS65217_CHIPID_REV_MASK 0x0F diff --git a/include/linux/micrel_phy.h b/include/linux/micrel_phy.h index 2e5b194b9b19..53d33dee70e1 100644 --- a/include/linux/micrel_phy.h +++ b/include/linux/micrel_phy.h @@ -37,6 +37,7 @@ /* struct phy_device dev_flags definitions */ #define MICREL_PHY_50MHZ_CLK 0x00000001 +#define MICREL_PHY_25MHZ_CLK 0x00000002 #define MICREL_KSZ9021_EXTREG_CTRL 0xB #define MICREL_KSZ9021_EXTREG_DATA_WRITE 0xC diff --git a/include/linux/migrate.h b/include/linux/migrate.h index a2901c414664..01aad3ed89ec 100644 --- a/include/linux/migrate.h +++ b/include/linux/migrate.h @@ -13,18 +13,9 @@ typedef void free_page_t(struct page *page, unsigned long private); * Return values from addresss_space_operations.migratepage(): * - negative errno on page migration failure; * - zero on page migration success; - * - * The balloon page migration introduces this special case where a 'distinct' - * return code is used to flag a successful page migration to unmap_and_move(). - * This approach is necessary because page migration can race against balloon - * deflation procedure, and for such case we could introduce a nasty page leak - * if a successfully migrated balloon page gets released concurrently with - * migration's unmap_and_move() wrap-up steps. */ #define MIGRATEPAGE_SUCCESS 0 -#define MIGRATEPAGE_BALLOON_SUCCESS 1 /* special ret code for balloon page - * sucessful migration case. - */ + enum migrate_reason { MR_COMPACTION, MR_MEMORY_FAILURE, @@ -82,9 +73,6 @@ static inline int migrate_huge_page_move_mapping(struct address_space *mapping, return -ENOSYS; } -/* Possible settings for the migrate_page() method in address_operations */ -#define migrate_page NULL - #endif /* CONFIG_MIGRATION */ #ifdef CONFIG_NUMA_BALANCING diff --git a/include/linux/mlx5/qp.h b/include/linux/mlx5/qp.h index 7c4c0f1f5805..3fa075daeb1d 100644 --- a/include/linux/mlx5/qp.h +++ b/include/linux/mlx5/qp.h @@ -40,6 +40,15 @@ #define MLX5_SIG_WQE_SIZE (MLX5_SEND_WQE_BB * 5) #define MLX5_DIF_SIZE 8 #define MLX5_STRIDE_BLOCK_OP 0x400 +#define MLX5_CPY_GRD_MASK 0xc0 +#define MLX5_CPY_APP_MASK 0x30 +#define MLX5_CPY_REF_MASK 0x0f +#define MLX5_BSF_INC_REFTAG (1 << 6) +#define MLX5_BSF_INL_VALID (1 << 15) +#define MLX5_BSF_REFRESH_DIF (1 << 14) +#define MLX5_BSF_REPEAT_BLOCK (1 << 7) +#define MLX5_BSF_APPTAG_ESCAPE 0x1 +#define MLX5_BSF_APPREF_ESCAPE 0x2 enum mlx5_qp_optpar { MLX5_QP_OPTPAR_ALT_ADDR_PATH = 1 << 0, @@ -287,6 +296,22 @@ struct mlx5_wqe_inline_seg { __be32 byte_count; }; +enum mlx5_sig_type { + MLX5_DIF_CRC = 0x1, + MLX5_DIF_IPCS = 0x2, +}; + +struct mlx5_bsf_inl { + __be16 vld_refresh; + __be16 dif_apptag; + __be32 dif_reftag; + u8 sig_type; + u8 rp_inv_seed; + u8 rsvd[3]; + u8 dif_inc_ref_guard_check; + __be16 dif_app_bitmask_check; +}; + struct mlx5_bsf { struct mlx5_bsf_basic { u8 bsf_size_sbs; @@ -310,14 +335,8 @@ struct mlx5_bsf { __be32 w_tfs_psv; __be32 m_tfs_psv; } ext; - struct mlx5_bsf_inl { - __be32 w_inl_vld; - __be32 w_rsvd; - __be64 w_block_format; - __be32 m_inl_vld; - __be32 m_rsvd; - __be64 m_block_format; - } inl; + struct mlx5_bsf_inl w_inl; + struct mlx5_bsf_inl m_inl; }; struct mlx5_klm { diff --git a/include/linux/mm.h b/include/linux/mm.h index 0f4196a0bc20..02d11ee7f19d 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -18,6 +18,7 @@ #include <linux/pfn.h> #include <linux/bit_spinlock.h> #include <linux/shrinker.h> +#include <linux/resource.h> struct mempolicy; struct anon_vma; @@ -346,6 +347,7 @@ static inline int put_page_unless_one(struct page *page) } extern int page_is_ram(unsigned long pfn); +extern int region_is_ram(resource_size_t phys_addr, unsigned long size); /* Support for virtually mapped pages */ struct page *vmalloc_to_page(const void *addr); @@ -553,6 +555,25 @@ static inline void __ClearPageBuddy(struct page *page) atomic_set(&page->_mapcount, -1); } +#define PAGE_BALLOON_MAPCOUNT_VALUE (-256) + +static inline int PageBalloon(struct page *page) +{ + return atomic_read(&page->_mapcount) == PAGE_BALLOON_MAPCOUNT_VALUE; +} + +static inline void __SetPageBalloon(struct page *page) +{ + VM_BUG_ON_PAGE(atomic_read(&page->_mapcount) != -1, page); + atomic_set(&page->_mapcount, PAGE_BALLOON_MAPCOUNT_VALUE); +} + +static inline void __ClearPageBalloon(struct page *page) +{ + VM_BUG_ON_PAGE(!PageBalloon(page), page); + atomic_set(&page->_mapcount, -1); +} + void put_page(struct page *page); void put_pages_list(struct list_head *pages); @@ -1247,8 +1268,8 @@ static inline int stack_guard_page_end(struct vm_area_struct *vma, !vma_growsup(vma->vm_next, addr); } -extern pid_t -vm_is_stack(struct task_struct *task, struct vm_area_struct *vma, int in_group); +extern struct task_struct *task_of_stack(struct task_struct *task, + struct vm_area_struct *vma, bool in_group); extern unsigned long move_page_tables(struct vm_area_struct *vma, unsigned long old_addr, struct vm_area_struct *new_vma, @@ -1780,6 +1801,20 @@ extern struct vm_area_struct *copy_vma(struct vm_area_struct **, bool *need_rmap_locks); extern void exit_mmap(struct mm_struct *); +static inline int check_data_rlimit(unsigned long rlim, + unsigned long new, + unsigned long start, + unsigned long end_data, + unsigned long start_data) +{ + if (rlim < RLIM_INFINITY) { + if (((new - start) + (end_data - start_data)) > rlim) + return -ENOSPC; + } + + return 0; +} + extern int mm_take_all_locks(struct mm_struct *mm); extern void mm_drop_all_locks(struct mm_struct *mm); @@ -1939,11 +1974,16 @@ static inline struct vm_area_struct *find_exact_vma(struct mm_struct *mm, #ifdef CONFIG_MMU pgprot_t vm_get_page_prot(unsigned long vm_flags); +void vma_set_page_prot(struct vm_area_struct *vma); #else static inline pgprot_t vm_get_page_prot(unsigned long vm_flags) { return __pgprot(0); } +static inline void vma_set_page_prot(struct vm_area_struct *vma) +{ + vma->vm_page_prot = vm_get_page_prot(vma->vm_flags); +} #endif #ifdef CONFIG_NUMA_BALANCING diff --git a/include/linux/mmc/card.h b/include/linux/mmc/card.h index d424b9de3aff..b0692d28f8e6 100644 --- a/include/linux/mmc/card.h +++ b/include/linux/mmc/card.h @@ -42,7 +42,8 @@ struct mmc_csd { unsigned int read_partial:1, read_misalign:1, write_partial:1, - write_misalign:1; + write_misalign:1, + dsr_imp:1; }; struct mmc_ext_csd { @@ -74,7 +75,7 @@ struct mmc_ext_csd { unsigned int sec_trim_mult; /* Secure trim multiplier */ unsigned int sec_erase_mult; /* Secure erase multiplier */ unsigned int trim_timeout; /* In milliseconds */ - bool enhanced_area_en; /* enable bit */ + bool partition_setting_completed; /* enable bit */ unsigned long long enhanced_area_offset; /* Units: Byte */ unsigned int enhanced_area_size; /* Units: KB */ unsigned int cache_size; /* Units: KB */ @@ -214,11 +215,12 @@ enum mmc_blk_status { }; /* The number of MMC physical partitions. These consist of: - * boot partitions (2), general purpose partitions (4) in MMC v4.4. + * boot partitions (2), general purpose partitions (4) and + * RPMB partition (1) in MMC v4.4. */ #define MMC_NUM_BOOT_PARTITION 2 #define MMC_NUM_GP_PARTITION 4 -#define MMC_NUM_PHY_PARTITION 6 +#define MMC_NUM_PHY_PARTITION 7 #define MAX_MMC_PART_NAME_LEN 20 /* diff --git a/include/linux/mmc/dw_mmc.h b/include/linux/mmc/dw_mmc.h index 29ce014ab421..001366927cf4 100644 --- a/include/linux/mmc/dw_mmc.h +++ b/include/linux/mmc/dw_mmc.h @@ -26,6 +26,8 @@ enum dw_mci_state { STATE_DATA_BUSY, STATE_SENDING_STOP, STATE_DATA_ERROR, + STATE_SENDING_CMD11, + STATE_WAITING_CMD11_DONE, }; enum { @@ -188,7 +190,7 @@ struct dw_mci { /* Workaround flags */ u32 quirks; - struct regulator *vmmc; /* Power regulator */ + bool vqmmc_enabled; unsigned long irq_flags; /* IRQ flags */ int irq; }; diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h index 7960424d0bc0..df0c15396bbf 100644 --- a/include/linux/mmc/host.h +++ b/include/linux/mmc/host.h @@ -42,6 +42,7 @@ struct mmc_ios { #define MMC_POWER_OFF 0 #define MMC_POWER_UP 1 #define MMC_POWER_ON 2 +#define MMC_POWER_UNDEFINED 3 unsigned char bus_width; /* data bus width */ @@ -139,6 +140,13 @@ struct mmc_host_ops { int (*select_drive_strength)(unsigned int max_dtr, int host_drv, int card_drv); void (*hw_reset)(struct mmc_host *host); void (*card_event)(struct mmc_host *host); + + /* + * Optional callback to support controllers with HW issues for multiple + * I/O. Returns the number of supported blocks for the request. + */ + int (*multi_io_quirk)(struct mmc_card *card, + unsigned int direction, int blk_size); }; struct mmc_card; @@ -265,7 +273,6 @@ struct mmc_host { #define MMC_CAP2_BOOTPART_NOACC (1 << 0) /* Boot partition no access */ #define MMC_CAP2_FULL_PWR_CYCLE (1 << 2) /* Can do full power cycle */ -#define MMC_CAP2_NO_MULTI_READ (1 << 3) /* Multiblock reads don't work */ #define MMC_CAP2_HS200_1_8V_SDR (1 << 5) /* can support */ #define MMC_CAP2_HS200_1_2V_SDR (1 << 6) /* can support */ #define MMC_CAP2_HS200 (MMC_CAP2_HS200_1_8V_SDR | \ @@ -365,6 +372,9 @@ struct mmc_host { unsigned int slotno; /* used for sdio acpi binding */ + int dsr_req; /* DSR value is valid */ + u32 dsr; /* optional driver stage (DSR) value */ + unsigned long private[0] ____cacheline_aligned; }; diff --git a/include/linux/mmc/mmc.h b/include/linux/mmc/mmc.h index 64ec963ed347..1cd00b3a75b9 100644 --- a/include/linux/mmc/mmc.h +++ b/include/linux/mmc/mmc.h @@ -53,6 +53,11 @@ #define MMC_SEND_TUNING_BLOCK 19 /* adtc R1 */ #define MMC_SEND_TUNING_BLOCK_HS200 21 /* adtc R1 */ +#define MMC_TUNING_BLK_PATTERN_4BIT_SIZE 64 +#define MMC_TUNING_BLK_PATTERN_8BIT_SIZE 128 +extern const u8 tuning_blk_pattern_4bit[MMC_TUNING_BLK_PATTERN_4BIT_SIZE]; +extern const u8 tuning_blk_pattern_8bit[MMC_TUNING_BLK_PATTERN_8BIT_SIZE]; + /* class 3 */ #define MMC_WRITE_DAT_UNTIL_STOP 20 /* adtc [31:0] data addr R1 */ @@ -281,6 +286,7 @@ struct _mmc_csd { #define EXT_CSD_EXP_EVENTS_CTRL 56 /* R/W, 2 bytes */ #define EXT_CSD_DATA_SECTOR_SIZE 61 /* R */ #define EXT_CSD_GP_SIZE_MULT 143 /* R/W */ +#define EXT_CSD_PARTITION_SETTING_COMPLETED 155 /* R/W */ #define EXT_CSD_PARTITION_ATTRIBUTE 156 /* R/W */ #define EXT_CSD_PARTITION_SUPPORT 160 /* RO */ #define EXT_CSD_HPI_MGMT 161 /* R/W */ @@ -349,6 +355,7 @@ struct _mmc_csd { #define EXT_CSD_PART_CONFIG_ACC_RPMB (0x3) #define EXT_CSD_PART_CONFIG_ACC_GP0 (0x4) +#define EXT_CSD_PART_SETTING_COMPLETED (0x1) #define EXT_CSD_PART_SUPPORT_PART_EN (0x1) #define EXT_CSD_CMD_SET_NORMAL (1<<0) diff --git a/include/linux/mmc/sdhci.h b/include/linux/mmc/sdhci.h index 09ebe57d5ce9..dba793e3a331 100644 --- a/include/linux/mmc/sdhci.h +++ b/include/linux/mmc/sdhci.h @@ -98,6 +98,8 @@ struct sdhci_host { #define SDHCI_QUIRK2_BROKEN_HS200 (1<<6) /* Controller does not support DDR50 */ #define SDHCI_QUIRK2_BROKEN_DDR50 (1<<7) +/* Stop command (CMD12) can set Transfer Complete when not using MMC_RSP_BUSY */ +#define SDHCI_QUIRK2_STOP_WITH_TC (1<<8) int irq; /* Device IRQ */ void __iomem *ioaddr; /* Mapped address */ @@ -146,6 +148,7 @@ struct sdhci_host { struct mmc_command *cmd; /* Current command */ struct mmc_data *data; /* Current data request */ unsigned int data_early:1; /* Data finished before cmd */ + unsigned int busy_handle:1; /* Handling the order of Busy-end */ struct sg_mapping_iter sg_miter; /* SG state for PIO */ unsigned int blocks; /* remaining PIO blocks */ diff --git a/include/linux/mmc/slot-gpio.h b/include/linux/mmc/slot-gpio.h index d2433381e828..e56fa24c9322 100644 --- a/include/linux/mmc/slot-gpio.h +++ b/include/linux/mmc/slot-gpio.h @@ -24,7 +24,10 @@ void mmc_gpio_free_cd(struct mmc_host *host); int mmc_gpiod_request_cd(struct mmc_host *host, const char *con_id, unsigned int idx, bool override_active_level, - unsigned int debounce); + unsigned int debounce, bool *gpio_invert); +int mmc_gpiod_request_ro(struct mmc_host *host, const char *con_id, + unsigned int idx, bool override_active_level, + unsigned int debounce, bool *gpio_invert); void mmc_gpiod_free_cd(struct mmc_host *host); void mmc_gpiod_request_cd_irq(struct mmc_host *host); diff --git a/include/linux/mmdebug.h b/include/linux/mmdebug.h index 2f348d02f640..877ef226f90f 100644 --- a/include/linux/mmdebug.h +++ b/include/linux/mmdebug.h @@ -4,10 +4,14 @@ #include <linux/stringify.h> struct page; +struct vm_area_struct; +struct mm_struct; extern void dump_page(struct page *page, const char *reason); extern void dump_page_badflags(struct page *page, const char *reason, unsigned long badflags); +void dump_vma(const struct vm_area_struct *vma); +void dump_mm(const struct mm_struct *mm); #ifdef CONFIG_DEBUG_VM #define VM_BUG_ON(cond) BUG_ON(cond) @@ -18,12 +22,28 @@ extern void dump_page_badflags(struct page *page, const char *reason, BUG(); \ } \ } while (0) +#define VM_BUG_ON_VMA(cond, vma) \ + do { \ + if (unlikely(cond)) { \ + dump_vma(vma); \ + BUG(); \ + } \ + } while (0) +#define VM_BUG_ON_MM(cond, mm) \ + do { \ + if (unlikely(cond)) { \ + dump_mm(mm); \ + BUG(); \ + } \ + } while (0) #define VM_WARN_ON(cond) WARN_ON(cond) #define VM_WARN_ON_ONCE(cond) WARN_ON_ONCE(cond) #define VM_WARN_ONCE(cond, format...) WARN_ONCE(cond, format) #else #define VM_BUG_ON(cond) BUILD_BUG_ON_INVALID(cond) #define VM_BUG_ON_PAGE(cond, page) VM_BUG_ON(cond) +#define VM_BUG_ON_VMA(cond, vma) VM_BUG_ON(cond) +#define VM_BUG_ON_MM(cond, mm) VM_BUG_ON(cond) #define VM_WARN_ON(cond) BUILD_BUG_ON_INVALID(cond) #define VM_WARN_ON_ONCE(cond) BUILD_BUG_ON_INVALID(cond) #define VM_WARN_ONCE(cond, format...) BUILD_BUG_ON_INVALID(cond) diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index 318df7051850..48bf12ef6620 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -521,13 +521,13 @@ struct zone { atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS]; } ____cacheline_internodealigned_in_smp; -typedef enum { +enum zone_flags { ZONE_RECLAIM_LOCKED, /* prevents concurrent reclaim */ ZONE_OOM_LOCKED, /* zone is in OOM killer zonelist */ ZONE_CONGESTED, /* zone has many dirty pages backed by * a congested BDI */ - ZONE_TAIL_LRU_DIRTY, /* reclaim scanning has recently found + ZONE_DIRTY, /* reclaim scanning has recently found * many dirty file pages at the tail * of the LRU. */ @@ -535,52 +535,7 @@ typedef enum { * many pages under writeback */ ZONE_FAIR_DEPLETED, /* fair zone policy batch depleted */ -} zone_flags_t; - -static inline void zone_set_flag(struct zone *zone, zone_flags_t flag) -{ - set_bit(flag, &zone->flags); -} - -static inline int zone_test_and_set_flag(struct zone *zone, zone_flags_t flag) -{ - return test_and_set_bit(flag, &zone->flags); -} - -static inline void zone_clear_flag(struct zone *zone, zone_flags_t flag) -{ - clear_bit(flag, &zone->flags); -} - -static inline int zone_is_reclaim_congested(const struct zone *zone) -{ - return test_bit(ZONE_CONGESTED, &zone->flags); -} - -static inline int zone_is_reclaim_dirty(const struct zone *zone) -{ - return test_bit(ZONE_TAIL_LRU_DIRTY, &zone->flags); -} - -static inline int zone_is_reclaim_writeback(const struct zone *zone) -{ - return test_bit(ZONE_WRITEBACK, &zone->flags); -} - -static inline int zone_is_reclaim_locked(const struct zone *zone) -{ - return test_bit(ZONE_RECLAIM_LOCKED, &zone->flags); -} - -static inline int zone_is_fair_depleted(const struct zone *zone) -{ - return test_bit(ZONE_FAIR_DEPLETED, &zone->flags); -} - -static inline int zone_is_oom_locked(const struct zone *zone) -{ - return test_bit(ZONE_OOM_LOCKED, &zone->flags); -} +}; static inline unsigned long zone_end_pfn(const struct zone *zone) { diff --git a/include/linux/moduleparam.h b/include/linux/moduleparam.h index b43f4752304e..1c9effa25e26 100644 --- a/include/linux/moduleparam.h +++ b/include/linux/moduleparam.h @@ -78,6 +78,8 @@ struct kernel_param { }; }; +extern const struct kernel_param __start___param[], __stop___param[]; + /* Special one for strings we want to copy into */ struct kparam_string { unsigned int maxlen; diff --git a/include/linux/msi.h b/include/linux/msi.h index 8103f32f6d87..44f4746d033b 100644 --- a/include/linux/msi.h +++ b/include/linux/msi.h @@ -29,7 +29,6 @@ struct msi_desc { __u8 multi_cap : 3; /* log2 num of messages supported */ __u8 maskbit : 1; /* mask-pending bit supported ? */ __u8 is_64 : 1; /* Address size: 0=32bit 1=64bit */ - __u8 pos; /* Location of the msi capability */ __u16 entry_nr; /* specific enabled entry */ unsigned default_irq; /* default pre-assigned irq */ } msi_attrib; @@ -47,8 +46,6 @@ struct msi_desc { /* Last set MSI message */ struct msi_msg msg; - - struct kobject kobj; }; /* @@ -60,7 +57,6 @@ int arch_setup_msi_irq(struct pci_dev *dev, struct msi_desc *desc); void arch_teardown_msi_irq(unsigned int irq); int arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type); void arch_teardown_msi_irqs(struct pci_dev *dev); -int arch_msi_check_device(struct pci_dev* dev, int nvec, int type); void arch_restore_msi_irqs(struct pci_dev *dev); void default_teardown_msi_irqs(struct pci_dev *dev); @@ -77,8 +73,6 @@ struct msi_chip { int (*setup_irq)(struct msi_chip *chip, struct pci_dev *dev, struct msi_desc *desc); void (*teardown_irq)(struct msi_chip *chip, unsigned int irq); - int (*check_device)(struct msi_chip *chip, struct pci_dev *dev, - int nvec, int type); }; #endif /* LINUX_MSI_H */ diff --git a/include/linux/mtd/cfi.h b/include/linux/mtd/cfi.h index 37ef6b194089..299d7d31fe53 100644 --- a/include/linux/mtd/cfi.h +++ b/include/linux/mtd/cfi.h @@ -153,7 +153,7 @@ struct cfi_ident { uint16_t MaxBufWriteSize; uint8_t NumEraseRegions; uint32_t EraseRegionInfo[0]; /* Not host ordered */ -} __attribute__((packed)); +} __packed; /* Extended Query Structure for both PRI and ALT */ @@ -161,7 +161,7 @@ struct cfi_extquery { uint8_t pri[3]; uint8_t MajorVersion; uint8_t MinorVersion; -} __attribute__((packed)); +} __packed; /* Vendor-Specific PRI for Intel/Sharp Extended Command Set (0x0001) */ @@ -180,7 +180,7 @@ struct cfi_pri_intelext { uint8_t FactProtRegSize; uint8_t UserProtRegSize; uint8_t extra[0]; -} __attribute__((packed)); +} __packed; struct cfi_intelext_otpinfo { uint32_t ProtRegAddr; @@ -188,7 +188,7 @@ struct cfi_intelext_otpinfo { uint8_t FactProtRegSize; uint16_t UserGroups; uint8_t UserProtRegSize; -} __attribute__((packed)); +} __packed; struct cfi_intelext_blockinfo { uint16_t NumIdentBlocks; @@ -196,7 +196,7 @@ struct cfi_intelext_blockinfo { uint16_t MinBlockEraseCycles; uint8_t BitsPerCell; uint8_t BlockCap; -} __attribute__((packed)); +} __packed; struct cfi_intelext_regioninfo { uint16_t NumIdentPartitions; @@ -205,7 +205,7 @@ struct cfi_intelext_regioninfo { uint8_t NumOpAllowedSimEraMode; uint8_t NumBlockTypes; struct cfi_intelext_blockinfo BlockTypes[1]; -} __attribute__((packed)); +} __packed; struct cfi_intelext_programming_regioninfo { uint8_t ProgRegShift; @@ -214,7 +214,7 @@ struct cfi_intelext_programming_regioninfo { uint8_t Reserved2; uint8_t ControlInvalid; uint8_t Reserved3; -} __attribute__((packed)); +} __packed; /* Vendor-Specific PRI for AMD/Fujitsu Extended Command Set (0x0002) */ @@ -233,7 +233,7 @@ struct cfi_pri_amdstd { uint8_t VppMin; uint8_t VppMax; uint8_t TopBottom; -} __attribute__((packed)); +} __packed; /* Vendor-Specific PRI for Atmel chips (command set 0x0002) */ @@ -245,18 +245,18 @@ struct cfi_pri_atmel { uint8_t BottomBoot; uint8_t BurstMode; uint8_t PageMode; -} __attribute__((packed)); +} __packed; struct cfi_pri_query { uint8_t NumFields; uint32_t ProtField[1]; /* Not host ordered */ -} __attribute__((packed)); +} __packed; struct cfi_bri_query { uint8_t PageModeReadCap; uint8_t NumFields; uint32_t ConfField[1]; /* Not host ordered */ -} __attribute__((packed)); +} __packed; #define P_ID_NONE 0x0000 #define P_ID_INTEL_EXT 0x0001 diff --git a/include/linux/mtd/nand.h b/include/linux/mtd/nand.h index c300db3ae285..e4d451e4600b 100644 --- a/include/linux/mtd/nand.h +++ b/include/linux/mtd/nand.h @@ -587,6 +587,11 @@ struct nand_buffers { * @ecc_step_ds: [INTERN] ECC step required by the @ecc_strength_ds, * also from the datasheet. It is the recommended ECC step * size, if known; if unknown, set to zero. + * @onfi_timing_mode_default: [INTERN] default ONFI timing mode. This field is + * either deduced from the datasheet if the NAND + * chip is not ONFI compliant or set to 0 if it is + * (an ONFI chip is always configured in mode 0 + * after a NAND reset) * @numchips: [INTERN] number of physical chips * @chipsize: [INTERN] the size of one chip for multichip arrays * @pagemask: [INTERN] page number mask = number of (pages / chip) - 1 @@ -671,6 +676,7 @@ struct nand_chip { uint8_t bits_per_cell; uint16_t ecc_strength_ds; uint16_t ecc_step_ds; + int onfi_timing_mode_default; int badblockpos; int badblockbits; @@ -766,12 +772,17 @@ struct nand_chip { * @options: stores various chip bit options * @id_len: The valid length of the @id. * @oobsize: OOB size + * @ecc: ECC correctability and step information from the datasheet. * @ecc.strength_ds: The ECC correctability from the datasheet, same as the * @ecc_strength_ds in nand_chip{}. * @ecc.step_ds: The ECC step required by the @ecc.strength_ds, same as the * @ecc_step_ds in nand_chip{}, also from the datasheet. * For example, the "4bit ECC for each 512Byte" can be set with * NAND_ECC_INFO(4, 512). + * @onfi_timing_mode_default: the default ONFI timing mode entered after a NAND + * reset. Should be deduced from timings described + * in the datasheet. + * */ struct nand_flash_dev { char *name; @@ -792,6 +803,7 @@ struct nand_flash_dev { uint16_t strength_ds; uint16_t step_ds; } ecc; + int onfi_timing_mode_default; }; /** diff --git a/include/linux/mutex.h b/include/linux/mutex.h index 8d5535c58cc2..cc31498fc526 100644 --- a/include/linux/mutex.h +++ b/include/linux/mutex.h @@ -52,7 +52,7 @@ struct mutex { atomic_t count; spinlock_t wait_lock; struct list_head wait_list; -#if defined(CONFIG_DEBUG_MUTEXES) || defined(CONFIG_SMP) +#if defined(CONFIG_DEBUG_MUTEXES) || defined(CONFIG_MUTEX_SPIN_ON_OWNER) struct task_struct *owner; #endif #ifdef CONFIG_MUTEX_SPIN_ON_OWNER @@ -133,7 +133,7 @@ static inline int mutex_is_locked(struct mutex *lock) /* * See kernel/locking/mutex.c for detailed documentation of these APIs. - * Also see Documentation/mutex-design.txt. + * Also see Documentation/locking/mutex-design.txt. */ #ifdef CONFIG_DEBUG_LOCK_ALLOC extern void mutex_lock_nested(struct mutex *lock, unsigned int subclass); diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 838407aea705..c9bcf33efb47 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -57,6 +57,8 @@ struct device; struct phy_device; /* 802.11 specific */ struct wireless_dev; +/* 802.15.4 specific */ +struct wpan_dev; void netdev_set_default_ethtool_ops(struct net_device *dev, const struct ethtool_ops *ops); @@ -998,6 +1000,12 @@ typedef u16 (*select_queue_fallback_t)(struct net_device *dev, * Callback to use for xmit over the accelerated station. This * is used in place of ndo_start_xmit on accelerated net * devices. + * bool (*ndo_gso_check) (struct sk_buff *skb, + * struct net_device *dev); + * Called by core transmit path to determine if device is capable of + * performing GSO on a packet. The device returns true if it is + * able to GSO the packet, false otherwise. If the return value is + * false the stack will do software GSO. */ struct net_device_ops { int (*ndo_init)(struct net_device *dev); @@ -1147,6 +1155,8 @@ struct net_device_ops { struct net_device *dev, void *priv); int (*ndo_get_lock_subclass)(struct net_device *dev); + bool (*ndo_gso_check) (struct sk_buff *skb, + struct net_device *dev); }; /** @@ -1564,6 +1574,7 @@ struct net_device { struct inet6_dev __rcu *ip6_ptr; void *ax25_ptr; struct wireless_dev *ieee80211_ptr; + struct wpan_dev *ieee802154_ptr; /* * Cache lines mostly used on receive path (including eth_type_trans()) @@ -3572,10 +3583,12 @@ static inline bool skb_gso_ok(struct sk_buff *skb, netdev_features_t features) (!skb_has_frag_list(skb) || (features & NETIF_F_FRAGLIST)); } -static inline bool netif_needs_gso(struct sk_buff *skb, +static inline bool netif_needs_gso(struct net_device *dev, struct sk_buff *skb, netdev_features_t features) { return skb_is_gso(skb) && (!skb_gso_ok(skb, features) || + (dev->netdev_ops->ndo_gso_check && + !dev->netdev_ops->ndo_gso_check(skb, dev)) || unlikely((skb->ip_summed != CHECKSUM_PARTIAL) && (skb->ip_summed != CHECKSUM_UNNECESSARY))); } diff --git a/include/linux/nfs4.h b/include/linux/nfs4.h index 026b0c042c40..356acc2846fd 100644 --- a/include/linux/nfs4.h +++ b/include/linux/nfs4.h @@ -487,6 +487,9 @@ enum { NFSPROC4_CLNT_GETDEVICELIST, NFSPROC4_CLNT_BIND_CONN_TO_SESSION, NFSPROC4_CLNT_DESTROY_CLIENTID, + + /* nfs42 */ + NFSPROC4_CLNT_SEEK, }; /* nfs41 types */ diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h index 28d649054d5f..c72d1ad41ad4 100644 --- a/include/linux/nfs_fs.h +++ b/include/linux/nfs_fs.h @@ -448,10 +448,10 @@ static inline struct rpc_cred *nfs_file_cred(struct file *file) extern ssize_t nfs_direct_IO(int, struct kiocb *, struct iov_iter *, loff_t); extern ssize_t nfs_file_direct_read(struct kiocb *iocb, struct iov_iter *iter, - loff_t pos, bool uio); + loff_t pos); extern ssize_t nfs_file_direct_write(struct kiocb *iocb, struct iov_iter *iter, - loff_t pos, bool uio); + loff_t pos); /* * linux/fs/nfs/dir.c diff --git a/include/linux/nfs_fs_sb.h b/include/linux/nfs_fs_sb.h index 922be2e050f5..a32ba0d7a98f 100644 --- a/include/linux/nfs_fs_sb.h +++ b/include/linux/nfs_fs_sb.h @@ -230,5 +230,6 @@ struct nfs_server { #define NFS_CAP_STATEID_NFSV41 (1U << 16) #define NFS_CAP_ATOMIC_OPEN_V1 (1U << 17) #define NFS_CAP_SECURITY_LABEL (1U << 18) +#define NFS_CAP_SEEK (1U << 19) #endif diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h index 6951c7d9097d..983876f24aed 100644 --- a/include/linux/nfs_xdr.h +++ b/include/linux/nfs_xdr.h @@ -1231,6 +1231,25 @@ struct pnfs_ds_commit_info { #endif /* CONFIG_NFS_V4_1 */ +#ifdef CONFIG_NFS_V4_2 +struct nfs42_seek_args { + struct nfs4_sequence_args seq_args; + + struct nfs_fh *sa_fh; + nfs4_stateid sa_stateid; + u64 sa_offset; + u32 sa_what; +}; + +struct nfs42_seek_res { + struct nfs4_sequence_res seq_res; + unsigned int status; + + u32 sr_eof; + u64 sr_offset; +}; +#endif + struct nfs_page; #define NFS_PAGEVEC_SIZE (8U) diff --git a/include/linux/nl802154.h b/include/linux/nl802154.h index 20163b9a0eae..167342c2ce6b 100644 --- a/include/linux/nl802154.h +++ b/include/linux/nl802154.h @@ -12,10 +12,6 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * - * You should have received a copy of the GNU General Public License along - * with this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. - * */ #ifndef NL802154_H diff --git a/include/linux/nmi.h b/include/linux/nmi.h index 1d2a6ab6b8bb..9b2022ab4d85 100644 --- a/include/linux/nmi.h +++ b/include/linux/nmi.h @@ -24,6 +24,19 @@ static inline void touch_nmi_watchdog(void) } #endif +#if defined(CONFIG_HARDLOCKUP_DETECTOR) +extern void watchdog_enable_hardlockup_detector(bool val); +extern bool watchdog_hardlockup_detector_is_enabled(void); +#else +static inline void watchdog_enable_hardlockup_detector(bool val) +{ +} +static inline bool watchdog_hardlockup_detector_is_enabled(void) +{ + return true; +} +#endif + /* * Create trigger_all_cpu_backtrace() out of the arch-provided * base function. Return whether such support was available, diff --git a/include/linux/of.h b/include/linux/of.h index 6c4363b8ddc3..6545e7aec7bb 100644 --- a/include/linux/of.h +++ b/include/linux/of.h @@ -863,4 +863,7 @@ static inline int of_changeset_update_property(struct of_changeset *ocs, } #endif +/* CONFIG_OF_RESOLVE api */ +extern int of_resolve_phandles(struct device_node *tree); + #endif /* _LINUX_OF_H */ diff --git a/include/linux/of_address.h b/include/linux/of_address.h index fb7b7221e063..8cb14eb393d6 100644 --- a/include/linux/of_address.h +++ b/include/linux/of_address.h @@ -23,17 +23,6 @@ struct of_pci_range { #define for_each_of_pci_range(parser, range) \ for (; of_pci_range_parser_one(parser, range);) -static inline void of_pci_range_to_resource(struct of_pci_range *range, - struct device_node *np, - struct resource *res) -{ - res->flags = range->flags; - res->start = range->cpu_addr; - res->end = range->cpu_addr + range->size - 1; - res->parent = res->child = res->sibling = NULL; - res->name = np->full_name; -} - /* Translate a DMA address from device space to CPU space */ extern u64 of_translate_dma_address(struct device_node *dev, const __be32 *in_addr); @@ -55,7 +44,9 @@ extern void __iomem *of_iomap(struct device_node *device, int index); extern const __be32 *of_get_address(struct device_node *dev, int index, u64 *size, unsigned int *flags); +extern int pci_register_io_range(phys_addr_t addr, resource_size_t size); extern unsigned long pci_address_to_pio(phys_addr_t addr); +extern phys_addr_t pci_pio_to_address(unsigned long pio); extern int of_pci_range_parser_init(struct of_pci_range_parser *parser, struct device_node *node); @@ -80,6 +71,11 @@ static inline const __be32 *of_get_address(struct device_node *dev, int index, return NULL; } +static inline phys_addr_t pci_pio_to_address(unsigned long pio) +{ + return 0; +} + static inline int of_pci_range_parser_init(struct of_pci_range_parser *parser, struct device_node *node) { @@ -138,6 +134,9 @@ extern const __be32 *of_get_pci_address(struct device_node *dev, int bar_no, u64 *size, unsigned int *flags); extern int of_pci_address_to_resource(struct device_node *dev, int bar, struct resource *r); +extern int of_pci_range_to_resource(struct of_pci_range *range, + struct device_node *np, + struct resource *res); #else /* CONFIG_OF_ADDRESS && CONFIG_PCI */ static inline int of_pci_address_to_resource(struct device_node *dev, int bar, struct resource *r) @@ -150,6 +149,12 @@ static inline const __be32 *of_get_pci_address(struct device_node *dev, { return NULL; } +static inline int of_pci_range_to_resource(struct of_pci_range *range, + struct device_node *np, + struct resource *res) +{ + return -ENOSYS; +} #endif /* CONFIG_OF_ADDRESS && CONFIG_PCI */ #endif /* __OF_ADDRESS_H */ diff --git a/include/linux/of_pci.h b/include/linux/of_pci.h index dde3a4a0fa5d..1fd207e7a847 100644 --- a/include/linux/of_pci.h +++ b/include/linux/of_pci.h @@ -15,6 +15,7 @@ struct device_node *of_pci_find_child_device(struct device_node *parent, int of_pci_get_devfn(struct device_node *np); int of_irq_parse_and_map_pci(const struct pci_dev *dev, u8 slot, u8 pin); int of_pci_parse_bus_range(struct device_node *node, struct resource *res); +int of_get_pci_domain_nr(struct device_node *node); #else static inline int of_irq_parse_pci(const struct pci_dev *pdev, struct of_phandle_args *out_irq) { @@ -43,6 +44,18 @@ of_pci_parse_bus_range(struct device_node *node, struct resource *res) { return -EINVAL; } + +static inline int +of_get_pci_domain_nr(struct device_node *node) +{ + return -1; +} +#endif + +#if defined(CONFIG_OF_ADDRESS) +int of_pci_get_host_bridge_resources(struct device_node *dev, + unsigned char busno, unsigned char bus_max, + struct list_head *resources, resource_size_t *io_base); #endif #if defined(CONFIG_OF) && defined(CONFIG_PCI_MSI) diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h index 19191d39c4f3..7ea069cd3257 100644 --- a/include/linux/pagemap.h +++ b/include/linux/pagemap.h @@ -24,8 +24,7 @@ enum mapping_flags { AS_ENOSPC = __GFP_BITS_SHIFT + 1, /* ENOSPC on async write */ AS_MM_ALL_LOCKS = __GFP_BITS_SHIFT + 2, /* under mm_take_all_locks() */ AS_UNEVICTABLE = __GFP_BITS_SHIFT + 3, /* e.g., ramdisk, SHM_LOCK */ - AS_BALLOON_MAP = __GFP_BITS_SHIFT + 4, /* balloon page special map */ - AS_EXITING = __GFP_BITS_SHIFT + 5, /* final truncate in progress */ + AS_EXITING = __GFP_BITS_SHIFT + 4, /* final truncate in progress */ }; static inline void mapping_set_error(struct address_space *mapping, int error) @@ -55,21 +54,6 @@ static inline int mapping_unevictable(struct address_space *mapping) return !!mapping; } -static inline void mapping_set_balloon(struct address_space *mapping) -{ - set_bit(AS_BALLOON_MAP, &mapping->flags); -} - -static inline void mapping_clear_balloon(struct address_space *mapping) -{ - clear_bit(AS_BALLOON_MAP, &mapping->flags); -} - -static inline int mapping_balloon(struct address_space *mapping) -{ - return mapping && test_bit(AS_BALLOON_MAP, &mapping->flags); -} - static inline void mapping_set_exiting(struct address_space *mapping) { set_bit(AS_EXITING, &mapping->flags); diff --git a/include/linux/pci.h b/include/linux/pci.h index 96453f9bc8ba..5be8db45e368 100644 --- a/include/linux/pci.h +++ b/include/linux/pci.h @@ -45,7 +45,7 @@ * In the interest of not exposing interfaces to user-space unnecessarily, * the following kernel-only defines are being added here. */ -#define PCI_DEVID(bus, devfn) ((((u16)bus) << 8) | devfn) +#define PCI_DEVID(bus, devfn) ((((u16)(bus)) << 8) | (devfn)) /* return bus from PCI devid = ((u16)bus_number) << 8) | devfn */ #define PCI_BUS_NUM(x) (((x) >> 8) & 0xff) @@ -457,6 +457,9 @@ struct pci_bus { unsigned char primary; /* number of primary bridge */ unsigned char max_bus_speed; /* enum pci_bus_speed */ unsigned char cur_bus_speed; /* enum pci_bus_speed */ +#ifdef CONFIG_PCI_DOMAINS_GENERIC + int domain_nr; +#endif char name[48]; @@ -1103,6 +1106,9 @@ int __must_check pci_bus_alloc_resource(struct pci_bus *bus, resource_size_t), void *alignf_data); + +int pci_remap_iospace(const struct resource *res, phys_addr_t phys_addr); + static inline dma_addr_t pci_bus_address(struct pci_dev *pdev, int bar) { struct pci_bus_region region; @@ -1288,12 +1294,32 @@ void pci_cfg_access_unlock(struct pci_dev *dev); */ #ifdef CONFIG_PCI_DOMAINS extern int pci_domains_supported; +int pci_get_new_domain_nr(void); #else enum { pci_domains_supported = 0 }; static inline int pci_domain_nr(struct pci_bus *bus) { return 0; } static inline int pci_proc_domain(struct pci_bus *bus) { return 0; } +static inline int pci_get_new_domain_nr(void) { return -ENOSYS; } #endif /* CONFIG_PCI_DOMAINS */ +/* + * Generic implementation for PCI domain support. If your + * architecture does not need custom management of PCI + * domains then this implementation will be used + */ +#ifdef CONFIG_PCI_DOMAINS_GENERIC +static inline int pci_domain_nr(struct pci_bus *bus) +{ + return bus->domain_nr; +} +void pci_bus_assign_domain_nr(struct pci_bus *bus, struct device *parent); +#else +static inline void pci_bus_assign_domain_nr(struct pci_bus *bus, + struct device *parent) +{ +} +#endif + /* some architectures require additional setup to direct VGA traffic */ typedef int (*arch_set_vga_state_t)(struct pci_dev *pdev, bool decode, unsigned int command_bits, u32 flags); @@ -1402,6 +1428,7 @@ static inline struct pci_dev *pci_get_bus_and_slot(unsigned int bus, static inline int pci_domain_nr(struct pci_bus *bus) { return 0; } static inline struct pci_dev *pci_dev_get(struct pci_dev *dev) { return NULL; } +static inline int pci_get_new_domain_nr(void) { return -ENOSYS; } #define dev_is_pci(d) (false) #define dev_is_pf(d) (false) @@ -1563,16 +1590,11 @@ enum pci_fixup_pass { #ifdef CONFIG_PCI_QUIRKS void pci_fixup_device(enum pci_fixup_pass pass, struct pci_dev *dev); -struct pci_dev *pci_get_dma_source(struct pci_dev *dev); int pci_dev_specific_acs_enabled(struct pci_dev *dev, u16 acs_flags); void pci_dev_specific_enable_acs(struct pci_dev *dev); #else static inline void pci_fixup_device(enum pci_fixup_pass pass, struct pci_dev *dev) { } -static inline struct pci_dev *pci_get_dma_source(struct pci_dev *dev) -{ - return pci_dev_get(dev); -} static inline int pci_dev_specific_acs_enabled(struct pci_dev *dev, u16 acs_flags) { @@ -1707,7 +1729,7 @@ bool pci_acs_path_enabled(struct pci_dev *start, struct pci_dev *end, u16 acs_flags); #define PCI_VPD_LRDT 0x80 /* Large Resource Data Type */ -#define PCI_VPD_LRDT_ID(x) (x | PCI_VPD_LRDT) +#define PCI_VPD_LRDT_ID(x) ((x) | PCI_VPD_LRDT) /* Large Resource Data Type Tag Item Names */ #define PCI_VPD_LTIN_ID_STRING 0x02 /* Identifier String */ @@ -1834,15 +1856,17 @@ int pci_for_each_dma_alias(struct pci_dev *pdev, int (*fn)(struct pci_dev *pdev, u16 alias, void *data), void *data); -/** - * pci_find_upstream_pcie_bridge - find upstream PCIe-to-PCI bridge of a device - * @pdev: the PCI device - * - * if the device is PCIE, return NULL - * if the device isn't connected to a PCIe bridge (that is its parent is a - * legacy PCI bridge and the bridge is directly connected to bus 0), return its - * parent - */ -struct pci_dev *pci_find_upstream_pcie_bridge(struct pci_dev *pdev); - +/* helper functions for operation of device flag */ +static inline void pci_set_dev_assigned(struct pci_dev *pdev) +{ + pdev->dev_flags |= PCI_DEV_FLAGS_ASSIGNED; +} +static inline void pci_clear_dev_assigned(struct pci_dev *pdev) +{ + pdev->dev_flags &= ~PCI_DEV_FLAGS_ASSIGNED; +} +static inline bool pci_is_dev_assigned(struct pci_dev *pdev) +{ + return (pdev->dev_flags & PCI_DEV_FLAGS_ASSIGNED) == PCI_DEV_FLAGS_ASSIGNED; +} #endif /* LINUX_PCI_H */ diff --git a/include/linux/pci_hotplug.h b/include/linux/pci_hotplug.h index 5f2e559af6b0..2706ee9a4327 100644 --- a/include/linux/pci_hotplug.h +++ b/include/linux/pci_hotplug.h @@ -187,6 +187,4 @@ static inline int pci_get_hp_params(struct pci_dev *dev, return -ENODEV; } #endif - -void pci_configure_slot(struct pci_dev *dev); #endif diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h index 6ed0bb73a864..1fa99a301817 100644 --- a/include/linux/pci_ids.h +++ b/include/linux/pci_ids.h @@ -2245,6 +2245,8 @@ #define PCI_VENDOR_ID_MORETON 0x15aa #define PCI_DEVICE_ID_RASTEL_2PORT 0x2000 +#define PCI_VENDOR_ID_VMWARE 0x15ad + #define PCI_VENDOR_ID_ZOLTRIX 0x15b0 #define PCI_DEVICE_ID_ZOLTRIX_2BD0 0x2bd0 @@ -2536,6 +2538,7 @@ #define PCI_DEVICE_ID_INTEL_EESSC 0x0008 #define PCI_DEVICE_ID_INTEL_SNB_IMC 0x0100 #define PCI_DEVICE_ID_INTEL_IVB_IMC 0x0154 +#define PCI_DEVICE_ID_INTEL_IVB_E3_IMC 0x0150 #define PCI_DEVICE_ID_INTEL_HSW_IMC 0x0c00 #define PCI_DEVICE_ID_INTEL_PXHD_0 0x0320 #define PCI_DEVICE_ID_INTEL_PXHD_1 0x0321 @@ -2557,6 +2560,7 @@ #define PCI_DEVICE_ID_INTEL_MFD_EMMC0 0x0823 #define PCI_DEVICE_ID_INTEL_MFD_EMMC1 0x0824 #define PCI_DEVICE_ID_INTEL_MRST_SD2 0x084F +#define PCI_DEVICE_ID_INTEL_QUARK_X1000_ILB 0x095E #define PCI_DEVICE_ID_INTEL_I960 0x0960 #define PCI_DEVICE_ID_INTEL_I960RM 0x0962 #define PCI_DEVICE_ID_INTEL_CENTERTON_ILB 0x0c60 @@ -2818,7 +2822,22 @@ #define PCI_DEVICE_ID_INTEL_UNC_R2PCIE 0x3c43 #define PCI_DEVICE_ID_INTEL_UNC_R3QPI0 0x3c44 #define PCI_DEVICE_ID_INTEL_UNC_R3QPI1 0x3c45 +#define PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_RAS 0x3c71 /* 15.1 */ +#define PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_ERR0 0x3c72 /* 16.2 */ +#define PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_ERR1 0x3c73 /* 16.3 */ +#define PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_ERR2 0x3c76 /* 16.6 */ +#define PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_ERR3 0x3c77 /* 16.7 */ +#define PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_HA0 0x3ca0 /* 14.0 */ +#define PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TA 0x3ca8 /* 15.0 */ +#define PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD0 0x3caa /* 15.2 */ +#define PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD1 0x3cab /* 15.3 */ +#define PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD2 0x3cac /* 15.4 */ +#define PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD3 0x3cad /* 15.5 */ +#define PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_DDRIO 0x3cb8 /* 17.0 */ #define PCI_DEVICE_ID_INTEL_JAKETOWN_UBOX 0x3ce0 +#define PCI_DEVICE_ID_INTEL_SBRIDGE_SAD0 0x3cf4 /* 12.6 */ +#define PCI_DEVICE_ID_INTEL_SBRIDGE_BR 0x3cf5 /* 13.6 */ +#define PCI_DEVICE_ID_INTEL_SBRIDGE_SAD1 0x3cf6 /* 12.7 */ #define PCI_DEVICE_ID_INTEL_IOAT_SNB 0x402f #define PCI_DEVICE_ID_INTEL_5100_16 0x65f0 #define PCI_DEVICE_ID_INTEL_5100_19 0x65f3 @@ -2860,6 +2879,7 @@ #define PCI_DEVICE_ID_INTEL_82372FB_1 0x7601 #define PCI_DEVICE_ID_INTEL_SCH_LPC 0x8119 #define PCI_DEVICE_ID_INTEL_SCH_IDE 0x811a +#define PCI_DEVICE_ID_INTEL_E6XX_CU 0x8183 #define PCI_DEVICE_ID_INTEL_ITC_LPC 0x8186 #define PCI_DEVICE_ID_INTEL_82454GX 0x84c4 #define PCI_DEVICE_ID_INTEL_82450GX 0x84c5 diff --git a/include/linux/percpu-defs.h b/include/linux/percpu-defs.h index cfd56046ecec..420032d41d27 100644 --- a/include/linux/percpu-defs.h +++ b/include/linux/percpu-defs.h @@ -257,9 +257,6 @@ do { \ #define __raw_get_cpu_var(var) (*raw_cpu_ptr(&(var))) #define __get_cpu_var(var) (*this_cpu_ptr(&(var))) -/* keep until we have removed all uses of __this_cpu_ptr */ -#define __this_cpu_ptr(ptr) raw_cpu_ptr(ptr) - /* * Must be an lvalue. Since @var must be a simple identifier, * we force a syntax error here if it isn't. diff --git a/include/linux/percpu-refcount.h b/include/linux/percpu-refcount.h index 68a64f11ce02..d5c89e0dd0e6 100644 --- a/include/linux/percpu-refcount.h +++ b/include/linux/percpu-refcount.h @@ -13,7 +13,7 @@ * * The refcount will have a range of 0 to ((1U << 31) - 1), i.e. one bit less * than an atomic_t - this is because of the way shutdown works, see - * percpu_ref_kill()/PCPU_COUNT_BIAS. + * percpu_ref_kill()/PERCPU_COUNT_BIAS. * * Before you call percpu_ref_kill(), percpu_ref_put() does not check for the * refcount hitting 0 - it can't, if it was in percpu mode. percpu_ref_kill() @@ -49,29 +49,60 @@ #include <linux/kernel.h> #include <linux/percpu.h> #include <linux/rcupdate.h> +#include <linux/gfp.h> struct percpu_ref; typedef void (percpu_ref_func_t)(struct percpu_ref *); +/* flags set in the lower bits of percpu_ref->percpu_count_ptr */ +enum { + __PERCPU_REF_ATOMIC = 1LU << 0, /* operating in atomic mode */ + __PERCPU_REF_DEAD = 1LU << 1, /* (being) killed */ + __PERCPU_REF_ATOMIC_DEAD = __PERCPU_REF_ATOMIC | __PERCPU_REF_DEAD, + + __PERCPU_REF_FLAG_BITS = 2, +}; + +/* @flags for percpu_ref_init() */ +enum { + /* + * Start w/ ref == 1 in atomic mode. Can be switched to percpu + * operation using percpu_ref_switch_to_percpu(). If initialized + * with this flag, the ref will stay in atomic mode until + * percpu_ref_switch_to_percpu() is invoked on it. + */ + PERCPU_REF_INIT_ATOMIC = 1 << 0, + + /* + * Start dead w/ ref == 0 in atomic mode. Must be revived with + * percpu_ref_reinit() before used. Implies INIT_ATOMIC. + */ + PERCPU_REF_INIT_DEAD = 1 << 1, +}; + struct percpu_ref { - atomic_t count; + atomic_long_t count; /* * The low bit of the pointer indicates whether the ref is in percpu * mode; if set, then get/put will manipulate the atomic_t. */ - unsigned long pcpu_count_ptr; + unsigned long percpu_count_ptr; percpu_ref_func_t *release; - percpu_ref_func_t *confirm_kill; + percpu_ref_func_t *confirm_switch; + bool force_atomic:1; struct rcu_head rcu; }; int __must_check percpu_ref_init(struct percpu_ref *ref, - percpu_ref_func_t *release); -void percpu_ref_reinit(struct percpu_ref *ref); + percpu_ref_func_t *release, unsigned int flags, + gfp_t gfp); void percpu_ref_exit(struct percpu_ref *ref); +void percpu_ref_switch_to_atomic(struct percpu_ref *ref, + percpu_ref_func_t *confirm_switch); +void percpu_ref_switch_to_percpu(struct percpu_ref *ref); void percpu_ref_kill_and_confirm(struct percpu_ref *ref, percpu_ref_func_t *confirm_kill); -void __percpu_ref_kill_expedited(struct percpu_ref *ref); +void percpu_ref_reinit(struct percpu_ref *ref); /** * percpu_ref_kill - drop the initial ref @@ -88,26 +119,24 @@ static inline void percpu_ref_kill(struct percpu_ref *ref) return percpu_ref_kill_and_confirm(ref, NULL); } -#define PCPU_REF_DEAD 1 - /* * Internal helper. Don't use outside percpu-refcount proper. The * function doesn't return the pointer and let the caller test it for NULL * because doing so forces the compiler to generate two conditional - * branches as it can't assume that @ref->pcpu_count is not NULL. + * branches as it can't assume that @ref->percpu_count is not NULL. */ -static inline bool __pcpu_ref_alive(struct percpu_ref *ref, - unsigned __percpu **pcpu_countp) +static inline bool __ref_is_percpu(struct percpu_ref *ref, + unsigned long __percpu **percpu_countp) { - unsigned long pcpu_ptr = ACCESS_ONCE(ref->pcpu_count_ptr); + unsigned long percpu_ptr = ACCESS_ONCE(ref->percpu_count_ptr); /* paired with smp_store_release() in percpu_ref_reinit() */ smp_read_barrier_depends(); - if (unlikely(pcpu_ptr & PCPU_REF_DEAD)) + if (unlikely(percpu_ptr & __PERCPU_REF_ATOMIC)) return false; - *pcpu_countp = (unsigned __percpu *)pcpu_ptr; + *percpu_countp = (unsigned long __percpu *)percpu_ptr; return true; } @@ -115,18 +144,20 @@ static inline bool __pcpu_ref_alive(struct percpu_ref *ref, * percpu_ref_get - increment a percpu refcount * @ref: percpu_ref to get * - * Analagous to atomic_inc(). - */ + * Analagous to atomic_long_inc(). + * + * This function is safe to call as long as @ref is between init and exit. + */ static inline void percpu_ref_get(struct percpu_ref *ref) { - unsigned __percpu *pcpu_count; + unsigned long __percpu *percpu_count; rcu_read_lock_sched(); - if (__pcpu_ref_alive(ref, &pcpu_count)) - this_cpu_inc(*pcpu_count); + if (__ref_is_percpu(ref, &percpu_count)) + this_cpu_inc(*percpu_count); else - atomic_inc(&ref->count); + atomic_long_inc(&ref->count); rcu_read_unlock_sched(); } @@ -138,20 +169,20 @@ static inline void percpu_ref_get(struct percpu_ref *ref) * Increment a percpu refcount unless its count already reached zero. * Returns %true on success; %false on failure. * - * The caller is responsible for ensuring that @ref stays accessible. + * This function is safe to call as long as @ref is between init and exit. */ static inline bool percpu_ref_tryget(struct percpu_ref *ref) { - unsigned __percpu *pcpu_count; - int ret = false; + unsigned long __percpu *percpu_count; + int ret; rcu_read_lock_sched(); - if (__pcpu_ref_alive(ref, &pcpu_count)) { - this_cpu_inc(*pcpu_count); + if (__ref_is_percpu(ref, &percpu_count)) { + this_cpu_inc(*percpu_count); ret = true; } else { - ret = atomic_inc_not_zero(&ref->count); + ret = atomic_long_inc_not_zero(&ref->count); } rcu_read_unlock_sched(); @@ -166,23 +197,26 @@ static inline bool percpu_ref_tryget(struct percpu_ref *ref) * Increment a percpu refcount unless it has already been killed. Returns * %true on success; %false on failure. * - * Completion of percpu_ref_kill() in itself doesn't guarantee that tryget - * will fail. For such guarantee, percpu_ref_kill_and_confirm() should be - * used. After the confirm_kill callback is invoked, it's guaranteed that - * no new reference will be given out by percpu_ref_tryget(). + * Completion of percpu_ref_kill() in itself doesn't guarantee that this + * function will fail. For such guarantee, percpu_ref_kill_and_confirm() + * should be used. After the confirm_kill callback is invoked, it's + * guaranteed that no new reference will be given out by + * percpu_ref_tryget_live(). * - * The caller is responsible for ensuring that @ref stays accessible. + * This function is safe to call as long as @ref is between init and exit. */ static inline bool percpu_ref_tryget_live(struct percpu_ref *ref) { - unsigned __percpu *pcpu_count; + unsigned long __percpu *percpu_count; int ret = false; rcu_read_lock_sched(); - if (__pcpu_ref_alive(ref, &pcpu_count)) { - this_cpu_inc(*pcpu_count); + if (__ref_is_percpu(ref, &percpu_count)) { + this_cpu_inc(*percpu_count); ret = true; + } else if (!(ACCESS_ONCE(ref->percpu_count_ptr) & __PERCPU_REF_DEAD)) { + ret = atomic_long_inc_not_zero(&ref->count); } rcu_read_unlock_sched(); @@ -196,16 +230,18 @@ static inline bool percpu_ref_tryget_live(struct percpu_ref *ref) * * Decrement the refcount, and if 0, call the release function (which was passed * to percpu_ref_init()) + * + * This function is safe to call as long as @ref is between init and exit. */ static inline void percpu_ref_put(struct percpu_ref *ref) { - unsigned __percpu *pcpu_count; + unsigned long __percpu *percpu_count; rcu_read_lock_sched(); - if (__pcpu_ref_alive(ref, &pcpu_count)) - this_cpu_dec(*pcpu_count); - else if (unlikely(atomic_dec_and_test(&ref->count))) + if (__ref_is_percpu(ref, &percpu_count)) + this_cpu_dec(*percpu_count); + else if (unlikely(atomic_long_dec_and_test(&ref->count))) ref->release(ref); rcu_read_unlock_sched(); @@ -216,14 +252,16 @@ static inline void percpu_ref_put(struct percpu_ref *ref) * @ref: percpu_ref to test * * Returns %true if @ref reached zero. + * + * This function is safe to call as long as @ref is between init and exit. */ static inline bool percpu_ref_is_zero(struct percpu_ref *ref) { - unsigned __percpu *pcpu_count; + unsigned long __percpu *percpu_count; - if (__pcpu_ref_alive(ref, &pcpu_count)) + if (__ref_is_percpu(ref, &percpu_count)) return false; - return !atomic_read(&ref->count); + return !atomic_long_read(&ref->count); } #endif diff --git a/include/linux/percpu.h b/include/linux/percpu.h index 6f61b61b7996..a3aa63e47637 100644 --- a/include/linux/percpu.h +++ b/include/linux/percpu.h @@ -48,9 +48,9 @@ * intelligent way to determine this would be nice. */ #if BITS_PER_LONG > 32 -#define PERCPU_DYNAMIC_RESERVE (20 << 10) +#define PERCPU_DYNAMIC_RESERVE (28 << 10) #else -#define PERCPU_DYNAMIC_RESERVE (12 << 10) +#define PERCPU_DYNAMIC_RESERVE (20 << 10) #endif extern void *pcpu_base_addr; @@ -122,11 +122,16 @@ extern void __init setup_per_cpu_areas(void); #endif extern void __init percpu_init_late(void); +extern void __percpu *__alloc_percpu_gfp(size_t size, size_t align, gfp_t gfp); extern void __percpu *__alloc_percpu(size_t size, size_t align); extern void free_percpu(void __percpu *__pdata); extern phys_addr_t per_cpu_ptr_to_phys(void *addr); -#define alloc_percpu(type) \ - (typeof(type) __percpu *)__alloc_percpu(sizeof(type), __alignof__(type)) +#define alloc_percpu_gfp(type, gfp) \ + (typeof(type) __percpu *)__alloc_percpu_gfp(sizeof(type), \ + __alignof__(type), gfp) +#define alloc_percpu(type) \ + (typeof(type) __percpu *)__alloc_percpu(sizeof(type), \ + __alignof__(type)) #endif /* __LINUX_PERCPU_H */ diff --git a/include/linux/percpu_counter.h b/include/linux/percpu_counter.h index d5dd4657c8d6..50e50095c8d1 100644 --- a/include/linux/percpu_counter.h +++ b/include/linux/percpu_counter.h @@ -12,6 +12,7 @@ #include <linux/threads.h> #include <linux/percpu.h> #include <linux/types.h> +#include <linux/gfp.h> #ifdef CONFIG_SMP @@ -26,14 +27,14 @@ struct percpu_counter { extern int percpu_counter_batch; -int __percpu_counter_init(struct percpu_counter *fbc, s64 amount, +int __percpu_counter_init(struct percpu_counter *fbc, s64 amount, gfp_t gfp, struct lock_class_key *key); -#define percpu_counter_init(fbc, value) \ +#define percpu_counter_init(fbc, value, gfp) \ ({ \ static struct lock_class_key __key; \ \ - __percpu_counter_init(fbc, value, &__key); \ + __percpu_counter_init(fbc, value, gfp, &__key); \ }) void percpu_counter_destroy(struct percpu_counter *fbc); @@ -89,7 +90,8 @@ struct percpu_counter { s64 count; }; -static inline int percpu_counter_init(struct percpu_counter *fbc, s64 amount) +static inline int percpu_counter_init(struct percpu_counter *fbc, s64 amount, + gfp_t gfp) { fbc->count = amount; return 0; diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index 707617a8c0f6..893a0d07986f 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h @@ -52,6 +52,7 @@ struct perf_guest_info_callbacks { #include <linux/atomic.h> #include <linux/sysfs.h> #include <linux/perf_regs.h> +#include <linux/workqueue.h> #include <asm/local.h> struct perf_callchain_entry { @@ -268,6 +269,7 @@ struct pmu { * enum perf_event_active_state - the states of a event */ enum perf_event_active_state { + PERF_EVENT_STATE_EXIT = -3, PERF_EVENT_STATE_ERROR = -2, PERF_EVENT_STATE_OFF = -1, PERF_EVENT_STATE_INACTIVE = 0, @@ -507,6 +509,9 @@ struct perf_event_context { int nr_cgroups; /* cgroup evts */ int nr_branch_stack; /* branch_stack evt */ struct rcu_head rcu_head; + + struct delayed_work orphans_remove; + bool orphans_remove_sched; }; /* @@ -604,6 +609,13 @@ struct perf_sample_data { u64 txn; }; +/* default value for data source */ +#define PERF_MEM_NA (PERF_MEM_S(OP, NA) |\ + PERF_MEM_S(LVL, NA) |\ + PERF_MEM_S(SNOOP, NA) |\ + PERF_MEM_S(LOCK, NA) |\ + PERF_MEM_S(TLB, NA)) + static inline void perf_sample_data_init(struct perf_sample_data *data, u64 addr, u64 period) { @@ -616,7 +628,7 @@ static inline void perf_sample_data_init(struct perf_sample_data *data, data->regs_user.regs = NULL; data->stack_user_size = 0; data->weight = 0; - data->data_src.val = 0; + data->data_src.val = PERF_MEM_NA; data->txn = 0; } diff --git a/include/linux/platform_data/dma-dw.h b/include/linux/platform_data/dma-dw.h new file mode 100644 index 000000000000..d8155c005242 --- /dev/null +++ b/include/linux/platform_data/dma-dw.h @@ -0,0 +1,59 @@ +/* + * Driver for the Synopsys DesignWare DMA Controller + * + * Copyright (C) 2007 Atmel Corporation + * Copyright (C) 2010-2011 ST Microelectronics + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#ifndef _PLATFORM_DATA_DMA_DW_H +#define _PLATFORM_DATA_DMA_DW_H + +#include <linux/device.h> + +/** + * struct dw_dma_slave - Controller-specific information about a slave + * + * @dma_dev: required DMA master device. Depricated. + * @src_id: src request line + * @dst_id: dst request line + * @src_master: src master for transfers on allocated channel. + * @dst_master: dest master for transfers on allocated channel. + */ +struct dw_dma_slave { + struct device *dma_dev; + u8 src_id; + u8 dst_id; + u8 src_master; + u8 dst_master; +}; + +/** + * struct dw_dma_platform_data - Controller configuration parameters + * @nr_channels: Number of channels supported by hardware (max 8) + * @is_private: The device channels should be marked as private and not for + * by the general purpose DMA channel allocator. + * @chan_allocation_order: Allocate channels starting from 0 or 7 + * @chan_priority: Set channel priority increasing from 0 to 7 or 7 to 0. + * @block_size: Maximum block size supported by the controller + * @nr_masters: Number of AHB masters supported by the controller + * @data_width: Maximum data width supported by hardware per AHB master + * (0 - 8bits, 1 - 16bits, ..., 5 - 256bits) + */ +struct dw_dma_platform_data { + unsigned int nr_channels; + bool is_private; +#define CHAN_ALLOCATION_ASCENDING 0 /* zero to seven */ +#define CHAN_ALLOCATION_DESCENDING 1 /* seven to zero */ + unsigned char chan_allocation_order; +#define CHAN_PRIORITY_ASCENDING 0 /* chan0 highest */ +#define CHAN_PRIORITY_DESCENDING 1 /* chan7 highest */ + unsigned char chan_priority; + unsigned short block_size; + unsigned char nr_masters; + unsigned char data_width[4]; +}; + +#endif /* _PLATFORM_DATA_DMA_DW_H */ diff --git a/include/linux/platform_data/elm.h b/include/linux/platform_data/elm.h index 780d1e97f620..b8686c00f15f 100644 --- a/include/linux/platform_data/elm.h +++ b/include/linux/platform_data/elm.h @@ -42,8 +42,24 @@ struct elm_errorvec { int error_loc[16]; }; +#if IS_ENABLED(CONFIG_MTD_NAND_OMAP_BCH) void elm_decode_bch_error_page(struct device *dev, u8 *ecc_calc, struct elm_errorvec *err_vec); int elm_config(struct device *dev, enum bch_ecc bch_type, int ecc_steps, int ecc_step_size, int ecc_syndrome_size); +#else +static inline void +elm_decode_bch_error_page(struct device *dev, u8 *ecc_calc, + struct elm_errorvec *err_vec) +{ +} + +static inline int elm_config(struct device *dev, enum bch_ecc bch_type, + int ecc_steps, int ecc_step_size, + int ecc_syndrome_size) +{ + return -ENOSYS; +} +#endif /* CONFIG_MTD_NAND_ECC_BCH */ + #endif /* __ELM_H */ diff --git a/include/linux/platform_data/gpio-dwapb.h b/include/linux/platform_data/gpio-dwapb.h new file mode 100644 index 000000000000..28702c849af1 --- /dev/null +++ b/include/linux/platform_data/gpio-dwapb.h @@ -0,0 +1,32 @@ +/* + * Copyright(c) 2014 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + */ + +#ifndef GPIO_DW_APB_H +#define GPIO_DW_APB_H + +struct dwapb_port_property { + struct device_node *node; + const char *name; + unsigned int idx; + unsigned int ngpio; + unsigned int gpio_base; + unsigned int irq; + bool irq_shared; +}; + +struct dwapb_platform_data { + struct dwapb_port_property *properties; + unsigned int nports; +}; + +#endif diff --git a/include/linux/platform_data/i2c-designware.h b/include/linux/platform_data/i2c-designware.h new file mode 100644 index 000000000000..7a61fb27c25b --- /dev/null +++ b/include/linux/platform_data/i2c-designware.h @@ -0,0 +1,21 @@ +/* + * Copyright(c) 2014 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + */ + +#ifndef I2C_DESIGNWARE_H +#define I2C_DESIGNWARE_H + +struct dw_i2c_platform_data { + unsigned int i2c_scl_freq; +}; + +#endif diff --git a/include/linux/platform_data/mtd-nand-omap2.h b/include/linux/platform_data/mtd-nand-omap2.h index 16ec262dfcc8..090bbab0130a 100644 --- a/include/linux/platform_data/mtd-nand-omap2.h +++ b/include/linux/platform_data/mtd-nand-omap2.h @@ -71,6 +71,7 @@ struct omap_nand_platform_data { struct mtd_partition *parts; int nr_parts; bool dev_ready; + bool flash_bbt; enum nand_io xfer_type; int devsize; enum omap_ecc ecc_opt; diff --git a/include/linux/platform_data/rcar-du.h b/include/linux/platform_data/rcar-du.h index 1a2e9901a22e..a5f045e1d8fe 100644 --- a/include/linux/platform_data/rcar-du.h +++ b/include/linux/platform_data/rcar-du.h @@ -14,7 +14,7 @@ #ifndef __RCAR_DU_H__ #define __RCAR_DU_H__ -#include <drm/drm_mode.h> +#include <video/videomode.h> enum rcar_du_output { RCAR_DU_OUTPUT_DPAD0, @@ -35,7 +35,7 @@ enum rcar_du_encoder_type { struct rcar_du_panel_data { unsigned int width_mm; /* Panel width in mm */ unsigned int height_mm; /* Panel height in mm */ - struct drm_mode_modeinfo mode; + struct videomode mode; }; struct rcar_du_connector_lvds_data { diff --git a/include/linux/pm.h b/include/linux/pm.h index 72c0fe098a27..383fd68aaee1 100644 --- a/include/linux/pm.h +++ b/include/linux/pm.h @@ -619,6 +619,7 @@ extern int dev_pm_put_subsys_data(struct device *dev); */ struct dev_pm_domain { struct dev_pm_ops ops; + void (*detach)(struct device *dev, bool power_off); }; /* @@ -679,12 +680,16 @@ struct dev_pm_domain { extern void device_pm_lock(void); extern void dpm_resume_start(pm_message_t state); extern void dpm_resume_end(pm_message_t state); +extern void dpm_resume_noirq(pm_message_t state); +extern void dpm_resume_early(pm_message_t state); extern void dpm_resume(pm_message_t state); extern void dpm_complete(pm_message_t state); extern void device_pm_unlock(void); extern int dpm_suspend_end(pm_message_t state); extern int dpm_suspend_start(pm_message_t state); +extern int dpm_suspend_noirq(pm_message_t state); +extern int dpm_suspend_late(pm_message_t state); extern int dpm_suspend(pm_message_t state); extern int dpm_prepare(pm_message_t state); diff --git a/include/linux/pm_domain.h b/include/linux/pm_domain.h index ebc4c76ffb73..73e938b7e937 100644 --- a/include/linux/pm_domain.h +++ b/include/linux/pm_domain.h @@ -35,18 +35,10 @@ struct gpd_dev_ops { int (*stop)(struct device *dev); int (*save_state)(struct device *dev); int (*restore_state)(struct device *dev); - int (*suspend)(struct device *dev); - int (*suspend_late)(struct device *dev); - int (*resume_early)(struct device *dev); - int (*resume)(struct device *dev); - int (*freeze)(struct device *dev); - int (*freeze_late)(struct device *dev); - int (*thaw_early)(struct device *dev); - int (*thaw)(struct device *dev); bool (*active_wakeup)(struct device *dev); }; -struct gpd_cpu_data { +struct gpd_cpuidle_data { unsigned int saved_exit_latency; struct cpuidle_state *idle_state; }; @@ -71,7 +63,6 @@ struct generic_pm_domain { unsigned int suspended_count; /* System suspend device counter */ unsigned int prepared_count; /* Suspend counter of prepared devices */ bool suspend_power_off; /* Power status before system suspend */ - bool dev_irq_safe; /* Device callbacks are IRQ-safe */ int (*power_off)(struct generic_pm_domain *domain); s64 power_off_latency_ns; int (*power_on)(struct generic_pm_domain *domain); @@ -80,8 +71,9 @@ struct generic_pm_domain { s64 max_off_time_ns; /* Maximum allowed "suspended" time. */ bool max_off_time_changed; bool cached_power_down_ok; - struct device_node *of_node; /* Node in device tree */ - struct gpd_cpu_data *cpu_data; + struct gpd_cpuidle_data *cpuidle_data; + void (*attach_dev)(struct device *dev); + void (*detach_dev)(struct device *dev); }; static inline struct generic_pm_domain *pd_to_genpd(struct dev_pm_domain *pd) @@ -108,7 +100,6 @@ struct gpd_timing_data { struct generic_pm_domain_data { struct pm_domain_data base; - struct gpd_dev_ops ops; struct gpd_timing_data td; struct notifier_block nb; struct mutex lock; @@ -127,17 +118,11 @@ static inline struct generic_pm_domain_data *dev_gpd_data(struct device *dev) return to_gpd_data(dev->power.subsys_data->domain_data); } -extern struct dev_power_governor simple_qos_governor; - extern struct generic_pm_domain *dev_to_genpd(struct device *dev); extern int __pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev, struct gpd_timing_data *td); -extern int __pm_genpd_of_add_device(struct device_node *genpd_node, - struct device *dev, - struct gpd_timing_data *td); - extern int __pm_genpd_name_add_device(const char *domain_name, struct device *dev, struct gpd_timing_data *td); @@ -151,10 +136,6 @@ extern int pm_genpd_add_subdomain_names(const char *master_name, const char *subdomain_name); extern int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd, struct generic_pm_domain *target); -extern int pm_genpd_add_callbacks(struct device *dev, - struct gpd_dev_ops *ops, - struct gpd_timing_data *td); -extern int __pm_genpd_remove_callbacks(struct device *dev, bool clear_td); extern int pm_genpd_attach_cpuidle(struct generic_pm_domain *genpd, int state); extern int pm_genpd_name_attach_cpuidle(const char *name, int state); extern int pm_genpd_detach_cpuidle(struct generic_pm_domain *genpd); @@ -165,8 +146,7 @@ extern void pm_genpd_init(struct generic_pm_domain *genpd, extern int pm_genpd_poweron(struct generic_pm_domain *genpd); extern int pm_genpd_name_poweron(const char *domain_name); -extern bool default_stop_ok(struct device *dev); - +extern struct dev_power_governor simple_qos_governor; extern struct dev_power_governor pm_domain_always_on_gov; #else @@ -184,12 +164,6 @@ static inline int __pm_genpd_add_device(struct generic_pm_domain *genpd, { return -ENOSYS; } -static inline int __pm_genpd_of_add_device(struct device_node *genpd_node, - struct device *dev, - struct gpd_timing_data *td) -{ - return -ENOSYS; -} static inline int __pm_genpd_name_add_device(const char *domain_name, struct device *dev, struct gpd_timing_data *td) @@ -217,16 +191,6 @@ static inline int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd, { return -ENOSYS; } -static inline int pm_genpd_add_callbacks(struct device *dev, - struct gpd_dev_ops *ops, - struct gpd_timing_data *td) -{ - return -ENOSYS; -} -static inline int __pm_genpd_remove_callbacks(struct device *dev, bool clear_td) -{ - return -ENOSYS; -} static inline int pm_genpd_attach_cpuidle(struct generic_pm_domain *genpd, int st) { return -ENOSYS; @@ -255,10 +219,6 @@ static inline int pm_genpd_name_poweron(const char *domain_name) { return -ENOSYS; } -static inline bool default_stop_ok(struct device *dev) -{ - return false; -} #define simple_qos_governor NULL #define pm_domain_always_on_gov NULL #endif @@ -269,45 +229,87 @@ static inline int pm_genpd_add_device(struct generic_pm_domain *genpd, return __pm_genpd_add_device(genpd, dev, NULL); } -static inline int pm_genpd_of_add_device(struct device_node *genpd_node, - struct device *dev) -{ - return __pm_genpd_of_add_device(genpd_node, dev, NULL); -} - static inline int pm_genpd_name_add_device(const char *domain_name, struct device *dev) { return __pm_genpd_name_add_device(domain_name, dev, NULL); } -static inline int pm_genpd_remove_callbacks(struct device *dev) -{ - return __pm_genpd_remove_callbacks(dev, true); -} - #ifdef CONFIG_PM_GENERIC_DOMAINS_RUNTIME -extern void genpd_queue_power_off_work(struct generic_pm_domain *genpd); extern void pm_genpd_poweroff_unused(void); #else -static inline void genpd_queue_power_off_work(struct generic_pm_domain *gpd) {} static inline void pm_genpd_poweroff_unused(void) {} #endif #ifdef CONFIG_PM_GENERIC_DOMAINS_SLEEP -extern void pm_genpd_syscore_switch(struct device *dev, bool suspend); +extern void pm_genpd_syscore_poweroff(struct device *dev); +extern void pm_genpd_syscore_poweron(struct device *dev); #else -static inline void pm_genpd_syscore_switch(struct device *dev, bool suspend) {} +static inline void pm_genpd_syscore_poweroff(struct device *dev) {} +static inline void pm_genpd_syscore_poweron(struct device *dev) {} #endif -static inline void pm_genpd_syscore_poweroff(struct device *dev) +/* OF PM domain providers */ +struct of_device_id; + +struct genpd_onecell_data { + struct generic_pm_domain **domains; + unsigned int num_domains; +}; + +typedef struct generic_pm_domain *(*genpd_xlate_t)(struct of_phandle_args *args, + void *data); + +#ifdef CONFIG_PM_GENERIC_DOMAINS_OF +int __of_genpd_add_provider(struct device_node *np, genpd_xlate_t xlate, + void *data); +void of_genpd_del_provider(struct device_node *np); + +struct generic_pm_domain *__of_genpd_xlate_simple( + struct of_phandle_args *genpdspec, + void *data); +struct generic_pm_domain *__of_genpd_xlate_onecell( + struct of_phandle_args *genpdspec, + void *data); + +int genpd_dev_pm_attach(struct device *dev); +#else /* !CONFIG_PM_GENERIC_DOMAINS_OF */ +static inline int __of_genpd_add_provider(struct device_node *np, + genpd_xlate_t xlate, void *data) +{ + return 0; +} +static inline void of_genpd_del_provider(struct device_node *np) {} + +#define __of_genpd_xlate_simple NULL +#define __of_genpd_xlate_onecell NULL + +static inline int genpd_dev_pm_attach(struct device *dev) +{ + return -ENODEV; +} +#endif /* CONFIG_PM_GENERIC_DOMAINS_OF */ + +static inline int of_genpd_add_provider_simple(struct device_node *np, + struct generic_pm_domain *genpd) +{ + return __of_genpd_add_provider(np, __of_genpd_xlate_simple, genpd); +} +static inline int of_genpd_add_provider_onecell(struct device_node *np, + struct genpd_onecell_data *data) { - pm_genpd_syscore_switch(dev, true); + return __of_genpd_add_provider(np, __of_genpd_xlate_onecell, data); } -static inline void pm_genpd_syscore_poweron(struct device *dev) +#ifdef CONFIG_PM +extern int dev_pm_domain_attach(struct device *dev, bool power_on); +extern void dev_pm_domain_detach(struct device *dev, bool power_off); +#else +static inline int dev_pm_domain_attach(struct device *dev, bool power_on) { - pm_genpd_syscore_switch(dev, false); + return -ENODEV; } +static inline void dev_pm_domain_detach(struct device *dev, bool power_off) {} +#endif #endif /* _LINUX_PM_DOMAIN_H */ diff --git a/include/linux/power_supply.h b/include/linux/power_supply.h index f3dea41dbcd2..3ed049673022 100644 --- a/include/linux/power_supply.h +++ b/include/linux/power_supply.h @@ -18,8 +18,6 @@ #include <linux/spinlock.h> #include <linux/notifier.h> -struct device; - /* * All voltages, currents, charges, energies, time and temperatures in uV, * µA, µAh, µWh, seconds and tenths of degree Celsius unless otherwise @@ -102,9 +100,11 @@ enum power_supply_property { POWER_SUPPLY_PROP_VOLTAGE_NOW, POWER_SUPPLY_PROP_VOLTAGE_AVG, POWER_SUPPLY_PROP_VOLTAGE_OCV, + POWER_SUPPLY_PROP_VOLTAGE_BOOT, POWER_SUPPLY_PROP_CURRENT_MAX, POWER_SUPPLY_PROP_CURRENT_NOW, POWER_SUPPLY_PROP_CURRENT_AVG, + POWER_SUPPLY_PROP_CURRENT_BOOT, POWER_SUPPLY_PROP_POWER_NOW, POWER_SUPPLY_PROP_POWER_AVG, POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN, @@ -146,6 +146,7 @@ enum power_supply_property { POWER_SUPPLY_PROP_TYPE, /* use power_supply.type instead */ POWER_SUPPLY_PROP_SCOPE, POWER_SUPPLY_PROP_CHARGE_TERM_CURRENT, + POWER_SUPPLY_PROP_CALIBRATE, /* Properties of type `const char *' */ POWER_SUPPLY_PROP_MODEL_NAME, POWER_SUPPLY_PROP_MANUFACTURER, @@ -172,6 +173,7 @@ union power_supply_propval { const char *strval; }; +struct device; struct device_node; struct power_supply { @@ -291,6 +293,7 @@ static inline bool power_supply_is_amp_property(enum power_supply_property psp) case POWER_SUPPLY_PROP_CURRENT_MAX: case POWER_SUPPLY_PROP_CURRENT_NOW: case POWER_SUPPLY_PROP_CURRENT_AVG: + case POWER_SUPPLY_PROP_CURRENT_BOOT: return 1; default: break; @@ -315,6 +318,7 @@ static inline bool power_supply_is_watt_property(enum power_supply_property psp) case POWER_SUPPLY_PROP_VOLTAGE_NOW: case POWER_SUPPLY_PROP_VOLTAGE_AVG: case POWER_SUPPLY_PROP_VOLTAGE_OCV: + case POWER_SUPPLY_PROP_VOLTAGE_BOOT: case POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE: case POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE_MAX: case POWER_SUPPLY_PROP_POWER_NOW: diff --git a/include/linux/prio_heap.h b/include/linux/prio_heap.h deleted file mode 100644 index 08094350f26a..000000000000 --- a/include/linux/prio_heap.h +++ /dev/null @@ -1,58 +0,0 @@ -#ifndef _LINUX_PRIO_HEAP_H -#define _LINUX_PRIO_HEAP_H - -/* - * Simple insertion-only static-sized priority heap containing - * pointers, based on CLR, chapter 7 - */ - -#include <linux/gfp.h> - -/** - * struct ptr_heap - simple static-sized priority heap - * @ptrs - pointer to data area - * @max - max number of elements that can be stored in @ptrs - * @size - current number of valid elements in @ptrs (in the range 0..@size-1 - * @gt: comparison operator, which should implement "greater than" - */ -struct ptr_heap { - void **ptrs; - int max; - int size; - int (*gt)(void *, void *); -}; - -/** - * heap_init - initialize an empty heap with a given memory size - * @heap: the heap structure to be initialized - * @size: amount of memory to use in bytes - * @gfp_mask: mask to pass to kmalloc() - * @gt: comparison operator, which should implement "greater than" - */ -extern int heap_init(struct ptr_heap *heap, size_t size, gfp_t gfp_mask, - int (*gt)(void *, void *)); - -/** - * heap_free - release a heap's storage - * @heap: the heap structure whose data should be released - */ -void heap_free(struct ptr_heap *heap); - -/** - * heap_insert - insert a value into the heap and return any overflowed value - * @heap: the heap to be operated on - * @p: the pointer to be inserted - * - * Attempts to insert the given value into the priority heap. If the - * heap is full prior to the insertion, then the resulting heap will - * consist of the smallest @max elements of the original heap and the - * new element; the greatest element will be removed from the heap and - * returned. Note that the returned element will be the new element - * (i.e. no change to the heap) if the new element is greater than all - * elements currently in the heap. - */ -extern void *heap_insert(struct ptr_heap *heap, void *p); - - - -#endif /* _LINUX_PRIO_HEAP_H */ diff --git a/include/linux/proportions.h b/include/linux/proportions.h index 26a8a4ed9b07..00e8e8fa7358 100644 --- a/include/linux/proportions.h +++ b/include/linux/proportions.h @@ -12,6 +12,7 @@ #include <linux/percpu_counter.h> #include <linux/spinlock.h> #include <linux/mutex.h> +#include <linux/gfp.h> struct prop_global { /* @@ -40,7 +41,7 @@ struct prop_descriptor { struct mutex mutex; /* serialize the prop_global switch */ }; -int prop_descriptor_init(struct prop_descriptor *pd, int shift); +int prop_descriptor_init(struct prop_descriptor *pd, int shift, gfp_t gfp); void prop_change_shift(struct prop_descriptor *pd, int new_shift); /* @@ -61,7 +62,7 @@ struct prop_local_percpu { raw_spinlock_t lock; /* protect the snapshot state */ }; -int prop_local_init_percpu(struct prop_local_percpu *pl); +int prop_local_init_percpu(struct prop_local_percpu *pl, gfp_t gfp); void prop_local_destroy_percpu(struct prop_local_percpu *pl); void __prop_inc_percpu(struct prop_descriptor *pd, struct prop_local_percpu *pl); void prop_fraction_percpu(struct prop_descriptor *pd, struct prop_local_percpu *pl, diff --git a/include/linux/rbtree_augmented.h b/include/linux/rbtree_augmented.h index fea49b5da12a..378c5ee75f78 100644 --- a/include/linux/rbtree_augmented.h +++ b/include/linux/rbtree_augmented.h @@ -43,6 +43,16 @@ struct rb_augment_callbacks { extern void __rb_insert_augmented(struct rb_node *node, struct rb_root *root, void (*augment_rotate)(struct rb_node *old, struct rb_node *new)); +/* + * Fixup the rbtree and update the augmented information when rebalancing. + * + * On insertion, the user must update the augmented information on the path + * leading to the inserted node, then call rb_link_node() as usual and + * rb_augment_inserted() instead of the usual rb_insert_color() call. + * If rb_augment_inserted() rebalances the rbtree, it will callback into + * a user provided function to update the augmented information on the + * affected subtrees. + */ static inline void rb_insert_augmented(struct rb_node *node, struct rb_root *root, const struct rb_augment_callbacks *augment) diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h index d231aa17b1d7..a4a819ffb2d1 100644 --- a/include/linux/rcupdate.h +++ b/include/linux/rcupdate.h @@ -47,14 +47,12 @@ #include <asm/barrier.h> extern int rcu_expedited; /* for sysctl */ -#ifdef CONFIG_RCU_TORTURE_TEST -extern int rcutorture_runnable; /* for sysctl */ -#endif /* #ifdef CONFIG_RCU_TORTURE_TEST */ enum rcutorture_type { RCU_FLAVOR, RCU_BH_FLAVOR, RCU_SCHED_FLAVOR, + RCU_TASKS_FLAVOR, SRCU_FLAVOR, INVALID_RCU_FLAVOR }; @@ -197,6 +195,28 @@ void call_rcu_sched(struct rcu_head *head, void synchronize_sched(void); +/** + * call_rcu_tasks() - Queue an RCU for invocation task-based grace period + * @head: structure to be used for queueing the RCU updates. + * @func: actual callback function to be invoked after the grace period + * + * The callback function will be invoked some time after a full grace + * period elapses, in other words after all currently executing RCU + * read-side critical sections have completed. call_rcu_tasks() assumes + * that the read-side critical sections end at a voluntary context + * switch (not a preemption!), entry into idle, or transition to usermode + * execution. As such, there are no read-side primitives analogous to + * rcu_read_lock() and rcu_read_unlock() because this primitive is intended + * to determine that all tasks have passed through a safe state, not so + * much for data-strcuture synchronization. + * + * See the description of call_rcu() for more detailed information on + * memory ordering guarantees. + */ +void call_rcu_tasks(struct rcu_head *head, void (*func)(struct rcu_head *head)); +void synchronize_rcu_tasks(void); +void rcu_barrier_tasks(void); + #ifdef CONFIG_PREEMPT_RCU void __rcu_read_lock(void); @@ -238,8 +258,8 @@ static inline int rcu_preempt_depth(void) /* Internal to kernel */ void rcu_init(void); -void rcu_sched_qs(int cpu); -void rcu_bh_qs(int cpu); +void rcu_sched_qs(void); +void rcu_bh_qs(void); void rcu_check_callbacks(int cpu, int user); struct notifier_block; void rcu_idle_enter(void); @@ -269,6 +289,14 @@ static inline void rcu_user_hooks_switch(struct task_struct *prev, struct task_struct *next) { } #endif /* CONFIG_RCU_USER_QS */ +#ifdef CONFIG_RCU_NOCB_CPU +void rcu_init_nohz(void); +#else /* #ifdef CONFIG_RCU_NOCB_CPU */ +static inline void rcu_init_nohz(void) +{ +} +#endif /* #else #ifdef CONFIG_RCU_NOCB_CPU */ + /** * RCU_NONIDLE - Indicate idle-loop code that needs RCU readers * @a: Code that RCU needs to pay attention to. @@ -294,6 +322,36 @@ static inline void rcu_user_hooks_switch(struct task_struct *prev, rcu_irq_exit(); \ } while (0) +/* + * Note a voluntary context switch for RCU-tasks benefit. This is a + * macro rather than an inline function to avoid #include hell. + */ +#ifdef CONFIG_TASKS_RCU +#define TASKS_RCU(x) x +extern struct srcu_struct tasks_rcu_exit_srcu; +#define rcu_note_voluntary_context_switch(t) \ + do { \ + if (ACCESS_ONCE((t)->rcu_tasks_holdout)) \ + ACCESS_ONCE((t)->rcu_tasks_holdout) = false; \ + } while (0) +#else /* #ifdef CONFIG_TASKS_RCU */ +#define TASKS_RCU(x) do { } while (0) +#define rcu_note_voluntary_context_switch(t) do { } while (0) +#endif /* #else #ifdef CONFIG_TASKS_RCU */ + +/** + * cond_resched_rcu_qs - Report potential quiescent states to RCU + * + * This macro resembles cond_resched(), except that it is defined to + * report potential quiescent states to RCU-tasks even if the cond_resched() + * machinery were to be shut off, as some advocate for PREEMPT kernels. + */ +#define cond_resched_rcu_qs() \ +do { \ + rcu_note_voluntary_context_switch(current); \ + cond_resched(); \ +} while (0) + #if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE) || defined(CONFIG_SMP) bool __rcu_is_watching(void); #endif /* #if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE) || defined(CONFIG_SMP) */ @@ -349,7 +407,7 @@ bool rcu_lockdep_current_cpu_online(void); #else /* #if defined(CONFIG_HOTPLUG_CPU) && defined(CONFIG_PROVE_RCU) */ static inline bool rcu_lockdep_current_cpu_online(void) { - return 1; + return true; } #endif /* #else #if defined(CONFIG_HOTPLUG_CPU) && defined(CONFIG_PROVE_RCU) */ @@ -371,41 +429,7 @@ extern struct lockdep_map rcu_sched_lock_map; extern struct lockdep_map rcu_callback_map; int debug_lockdep_rcu_enabled(void); -/** - * rcu_read_lock_held() - might we be in RCU read-side critical section? - * - * If CONFIG_DEBUG_LOCK_ALLOC is selected, returns nonzero iff in an RCU - * read-side critical section. In absence of CONFIG_DEBUG_LOCK_ALLOC, - * this assumes we are in an RCU read-side critical section unless it can - * prove otherwise. This is useful for debug checks in functions that - * require that they be called within an RCU read-side critical section. - * - * Checks debug_lockdep_rcu_enabled() to prevent false positives during boot - * and while lockdep is disabled. - * - * Note that rcu_read_lock() and the matching rcu_read_unlock() must - * occur in the same context, for example, it is illegal to invoke - * rcu_read_unlock() in process context if the matching rcu_read_lock() - * was invoked from within an irq handler. - * - * Note that rcu_read_lock() is disallowed if the CPU is either idle or - * offline from an RCU perspective, so check for those as well. - */ -static inline int rcu_read_lock_held(void) -{ - if (!debug_lockdep_rcu_enabled()) - return 1; - if (!rcu_is_watching()) - return 0; - if (!rcu_lockdep_current_cpu_online()) - return 0; - return lock_is_held(&rcu_lock_map); -} - -/* - * rcu_read_lock_bh_held() is defined out of line to avoid #include-file - * hell. - */ +int rcu_read_lock_held(void); int rcu_read_lock_bh_held(void); /** diff --git a/include/linux/rcutiny.h b/include/linux/rcutiny.h index d40a6a451330..38cc5b1e252d 100644 --- a/include/linux/rcutiny.h +++ b/include/linux/rcutiny.h @@ -80,7 +80,7 @@ static inline void kfree_call_rcu(struct rcu_head *head, static inline void rcu_note_context_switch(int cpu) { - rcu_sched_qs(cpu); + rcu_sched_qs(); } /* diff --git a/include/linux/reboot.h b/include/linux/reboot.h index 48bf152761c7..67fc8fcdc4b0 100644 --- a/include/linux/reboot.h +++ b/include/linux/reboot.h @@ -38,6 +38,9 @@ extern int reboot_force; extern int register_reboot_notifier(struct notifier_block *); extern int unregister_reboot_notifier(struct notifier_block *); +extern int register_restart_handler(struct notifier_block *); +extern int unregister_restart_handler(struct notifier_block *); +extern void do_kernel_restart(char *cmd); /* * Architecture-specific implementations of sys_reboot commands. diff --git a/include/linux/rmap.h b/include/linux/rmap.h index be574506e6a9..c0c2bce6b0b7 100644 --- a/include/linux/rmap.h +++ b/include/linux/rmap.h @@ -150,7 +150,7 @@ int anon_vma_fork(struct vm_area_struct *, struct vm_area_struct *); static inline void anon_vma_merge(struct vm_area_struct *vma, struct vm_area_struct *next) { - VM_BUG_ON(vma->anon_vma != next->anon_vma); + VM_BUG_ON_VMA(vma->anon_vma != next->anon_vma, vma); unlink_anon_vmas(next); } diff --git a/include/linux/rwsem.h b/include/linux/rwsem.h index 035d3c57fc8a..8f498cdde280 100644 --- a/include/linux/rwsem.h +++ b/include/linux/rwsem.h @@ -149,7 +149,7 @@ extern void downgrade_write(struct rw_semaphore *sem); * static then another method for expressing nested locking is * the explicit definition of lock class keys and the use of * lockdep_set_class() at lock initialization time. - * See Documentation/lockdep-design.txt for more details.) + * See Documentation/locking/lockdep-design.txt for more details.) */ extern void down_read_nested(struct rw_semaphore *sem, int subclass); extern void down_write_nested(struct rw_semaphore *sem, int subclass); diff --git a/include/linux/sched.h b/include/linux/sched.h index 9c6353d9e63a..5e344bbe63ec 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -57,6 +57,7 @@ struct sched_param { #include <linux/llist.h> #include <linux/uidgid.h> #include <linux/gfp.h> +#include <linux/magic.h> #include <asm/processor.h> @@ -646,6 +647,7 @@ struct signal_struct { * Live threads maintain their own counters and add to these * in __exit_signal, except for the group leader. */ + seqlock_t stats_lock; cputime_t utime, stime, cutime, cstime; cputime_t gtime; cputime_t cgtime; @@ -1024,6 +1026,7 @@ struct sched_domain_topology_level { extern struct sched_domain_topology_level *sched_domain_topology; extern void set_sched_topology(struct sched_domain_topology_level *tl); +extern void wake_up_if_idle(int cpu); #ifdef CONFIG_SCHED_DEBUG # define SD_INIT_NAME(type) .name = #type @@ -1213,6 +1216,13 @@ struct sched_dl_entity { struct hrtimer dl_timer; }; +union rcu_special { + struct { + bool blocked; + bool need_qs; + } b; + short s; +}; struct rcu_node; enum perf_event_task_context { @@ -1265,12 +1275,18 @@ struct task_struct { #ifdef CONFIG_PREEMPT_RCU int rcu_read_lock_nesting; - char rcu_read_unlock_special; + union rcu_special rcu_read_unlock_special; struct list_head rcu_node_entry; #endif /* #ifdef CONFIG_PREEMPT_RCU */ #ifdef CONFIG_TREE_PREEMPT_RCU struct rcu_node *rcu_blocked_node; #endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */ +#ifdef CONFIG_TASKS_RCU + unsigned long rcu_tasks_nvcsw; + bool rcu_tasks_holdout; + struct list_head rcu_tasks_holdout_list; + int rcu_tasks_idle_cpu; +#endif /* #ifdef CONFIG_TASKS_RCU */ #if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT) struct sched_info sched_info; @@ -1935,11 +1951,13 @@ extern void thread_group_cputime_adjusted(struct task_struct *p, cputime_t *ut, #define tsk_used_math(p) ((p)->flags & PF_USED_MATH) #define used_math() tsk_used_math(current) -/* __GFP_IO isn't allowed if PF_MEMALLOC_NOIO is set in current->flags */ +/* __GFP_IO isn't allowed if PF_MEMALLOC_NOIO is set in current->flags + * __GFP_FS is also cleared as it implies __GFP_IO. + */ static inline gfp_t memalloc_noio_flags(gfp_t flags) { if (unlikely(current->flags & PF_MEMALLOC_NOIO)) - flags &= ~__GFP_IO; + flags &= ~(__GFP_IO | __GFP_FS); return flags; } @@ -2012,29 +2030,21 @@ extern void task_clear_jobctl_trapping(struct task_struct *task); extern void task_clear_jobctl_pending(struct task_struct *task, unsigned int mask); -#ifdef CONFIG_PREEMPT_RCU - -#define RCU_READ_UNLOCK_BLOCKED (1 << 0) /* blocked while in RCU read-side. */ -#define RCU_READ_UNLOCK_NEED_QS (1 << 1) /* RCU core needs CPU response. */ - static inline void rcu_copy_process(struct task_struct *p) { +#ifdef CONFIG_PREEMPT_RCU p->rcu_read_lock_nesting = 0; - p->rcu_read_unlock_special = 0; -#ifdef CONFIG_TREE_PREEMPT_RCU + p->rcu_read_unlock_special.s = 0; p->rcu_blocked_node = NULL; -#endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */ INIT_LIST_HEAD(&p->rcu_node_entry); +#endif /* #ifdef CONFIG_PREEMPT_RCU */ +#ifdef CONFIG_TASKS_RCU + p->rcu_tasks_holdout = false; + INIT_LIST_HEAD(&p->rcu_tasks_holdout_list); + p->rcu_tasks_idle_cpu = -1; +#endif /* #ifdef CONFIG_TASKS_RCU */ } -#else - -static inline void rcu_copy_process(struct task_struct *p) -{ -} - -#endif - static inline void tsk_restore_flags(struct task_struct *task, unsigned long orig_flags, unsigned long flags) { @@ -2640,6 +2650,8 @@ static inline unsigned long *end_of_stack(struct task_struct *p) } #endif +#define task_stack_end_corrupted(task) \ + (*(end_of_stack(task)) != STACK_END_MAGIC) static inline int object_is_on_stack(void *obj) { @@ -2662,6 +2674,7 @@ static inline unsigned long stack_not_used(struct task_struct *p) return (unsigned long)n - (unsigned long)end_of_stack(p); } #endif +extern void set_task_stack_end_magic(struct task_struct *tsk); /* set thread flags in other task's structures * - see asm/thread_info.h for TIF_xxxx flags available diff --git a/include/linux/screen_info.h b/include/linux/screen_info.h index 005bf3e38db5..f0f8bad54be9 100644 --- a/include/linux/screen_info.h +++ b/include/linux/screen_info.h @@ -5,12 +5,4 @@ extern struct screen_info screen_info; -#define ORIG_X (screen_info.orig_x) -#define ORIG_Y (screen_info.orig_y) -#define ORIG_VIDEO_MODE (screen_info.orig_video_mode) -#define ORIG_VIDEO_COLS (screen_info.orig_video_cols) -#define ORIG_VIDEO_EGA_BX (screen_info.orig_video_ega_bx) -#define ORIG_VIDEO_LINES (screen_info.orig_video_lines) -#define ORIG_VIDEO_ISVGA (screen_info.orig_video_isVGA) -#define ORIG_VIDEO_POINTS (screen_info.orig_video_points) #endif /* _SCREEN_INFO_H */ diff --git a/include/linux/seccomp.h b/include/linux/seccomp.h index 5d586a45a319..a19ddacdac30 100644 --- a/include/linux/seccomp.h +++ b/include/linux/seccomp.h @@ -27,19 +27,23 @@ struct seccomp { struct seccomp_filter *filter; }; -extern int __secure_computing(int); -static inline int secure_computing(int this_syscall) +#ifdef CONFIG_HAVE_ARCH_SECCOMP_FILTER +extern int __secure_computing(void); +static inline int secure_computing(void) { if (unlikely(test_thread_flag(TIF_SECCOMP))) - return __secure_computing(this_syscall); + return __secure_computing(); return 0; } -/* A wrapper for architectures supporting only SECCOMP_MODE_STRICT. */ -static inline void secure_computing_strict(int this_syscall) -{ - BUG_ON(secure_computing(this_syscall) != 0); -} +#define SECCOMP_PHASE1_OK 0 +#define SECCOMP_PHASE1_SKIP 1 + +extern u32 seccomp_phase1(struct seccomp_data *sd); +int seccomp_phase2(u32 phase1_result); +#else +extern void secure_computing_strict(int this_syscall); +#endif extern long prctl_get_seccomp(void); extern long prctl_set_seccomp(unsigned long, char __user *); @@ -56,8 +60,11 @@ static inline int seccomp_mode(struct seccomp *s) struct seccomp { }; struct seccomp_filter { }; -static inline int secure_computing(int this_syscall) { return 0; } +#ifdef CONFIG_HAVE_ARCH_SECCOMP_FILTER +static inline int secure_computing(void) { return 0; } +#else static inline void secure_computing_strict(int this_syscall) { return; } +#endif static inline long prctl_get_seccomp(void) { diff --git a/include/linux/security.h b/include/linux/security.h index 623f90e5f38d..ba96471c11ba 100644 --- a/include/linux/security.h +++ b/include/linux/security.h @@ -1559,7 +1559,7 @@ struct security_operations { int (*file_lock) (struct file *file, unsigned int cmd); int (*file_fcntl) (struct file *file, unsigned int cmd, unsigned long arg); - int (*file_set_fowner) (struct file *file); + void (*file_set_fowner) (struct file *file); int (*file_send_sigiotask) (struct task_struct *tsk, struct fown_struct *fown, int sig); int (*file_receive) (struct file *file); @@ -1834,7 +1834,7 @@ int security_file_mprotect(struct vm_area_struct *vma, unsigned long reqprot, unsigned long prot); int security_file_lock(struct file *file, unsigned int cmd); int security_file_fcntl(struct file *file, unsigned int cmd, unsigned long arg); -int security_file_set_fowner(struct file *file); +void security_file_set_fowner(struct file *file); int security_file_send_sigiotask(struct task_struct *tsk, struct fown_struct *fown, int sig); int security_file_receive(struct file *file); @@ -2108,7 +2108,7 @@ static inline int security_dentry_init_security(struct dentry *dentry, static inline int security_inode_init_security(struct inode *inode, struct inode *dir, const struct qstr *qstr, - const initxattrs initxattrs, + const initxattrs xattrs, void *fs_data) { return 0; @@ -2312,9 +2312,9 @@ static inline int security_file_fcntl(struct file *file, unsigned int cmd, return 0; } -static inline int security_file_set_fowner(struct file *file) +static inline void security_file_set_fowner(struct file *file) { - return 0; + return; } static inline int security_file_send_sigiotask(struct task_struct *tsk, diff --git a/include/linux/seqlock.h b/include/linux/seqlock.h index cc359636cfa3..f5df8f687b4d 100644 --- a/include/linux/seqlock.h +++ b/include/linux/seqlock.h @@ -456,4 +456,23 @@ read_sequnlock_excl_irqrestore(seqlock_t *sl, unsigned long flags) spin_unlock_irqrestore(&sl->lock, flags); } +static inline unsigned long +read_seqbegin_or_lock_irqsave(seqlock_t *lock, int *seq) +{ + unsigned long flags = 0; + + if (!(*seq & 1)) /* Even */ + *seq = read_seqbegin(lock); + else /* Odd */ + read_seqlock_excl_irqsave(lock, flags); + + return flags; +} + +static inline void +done_seqretry_irqrestore(seqlock_t *lock, int seq, unsigned long flags) +{ + if (seq & 1) + read_sequnlock_excl_irqrestore(lock, flags); +} #endif /* __LINUX_SEQLOCK_H */ diff --git a/include/linux/signal.h b/include/linux/signal.h index 750196fcc0a5..ab1e0392b5ac 100644 --- a/include/linux/signal.h +++ b/include/linux/signal.h @@ -2,6 +2,7 @@ #define _LINUX_SIGNAL_H #include <linux/list.h> +#include <linux/bug.h> #include <uapi/linux/signal.h> struct task_struct; @@ -67,7 +68,6 @@ static inline int sigismember(sigset_t *set, int _sig) static inline int sigisemptyset(sigset_t *set) { - extern void _NSIG_WORDS_is_unsupported_size(void); switch (_NSIG_WORDS) { case 4: return (set->sig[3] | set->sig[2] | @@ -77,7 +77,7 @@ static inline int sigisemptyset(sigset_t *set) case 1: return set->sig[0] == 0; default: - _NSIG_WORDS_is_unsupported_size(); + BUILD_BUG(); return 0; } } @@ -90,24 +90,23 @@ static inline int sigisemptyset(sigset_t *set) #define _SIG_SET_BINOP(name, op) \ static inline void name(sigset_t *r, const sigset_t *a, const sigset_t *b) \ { \ - extern void _NSIG_WORDS_is_unsupported_size(void); \ unsigned long a0, a1, a2, a3, b0, b1, b2, b3; \ \ switch (_NSIG_WORDS) { \ - case 4: \ + case 4: \ a3 = a->sig[3]; a2 = a->sig[2]; \ b3 = b->sig[3]; b2 = b->sig[2]; \ r->sig[3] = op(a3, b3); \ r->sig[2] = op(a2, b2); \ - case 2: \ + case 2: \ a1 = a->sig[1]; b1 = b->sig[1]; \ r->sig[1] = op(a1, b1); \ - case 1: \ + case 1: \ a0 = a->sig[0]; b0 = b->sig[0]; \ r->sig[0] = op(a0, b0); \ break; \ - default: \ - _NSIG_WORDS_is_unsupported_size(); \ + default: \ + BUILD_BUG(); \ } \ } @@ -128,16 +127,14 @@ _SIG_SET_BINOP(sigandnsets, _sig_andn) #define _SIG_SET_OP(name, op) \ static inline void name(sigset_t *set) \ { \ - extern void _NSIG_WORDS_is_unsupported_size(void); \ - \ switch (_NSIG_WORDS) { \ - case 4: set->sig[3] = op(set->sig[3]); \ - set->sig[2] = op(set->sig[2]); \ - case 2: set->sig[1] = op(set->sig[1]); \ - case 1: set->sig[0] = op(set->sig[0]); \ + case 4: set->sig[3] = op(set->sig[3]); \ + set->sig[2] = op(set->sig[2]); \ + case 2: set->sig[1] = op(set->sig[1]); \ + case 1: set->sig[0] = op(set->sig[0]); \ break; \ - default: \ - _NSIG_WORDS_is_unsupported_size(); \ + default: \ + BUILD_BUG(); \ } \ } diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 3ab0749d6875..a59d9343c25b 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -1203,7 +1203,12 @@ static inline struct sk_buff *skb_unshare(struct sk_buff *skb, might_sleep_if(pri & __GFP_WAIT); if (skb_cloned(skb)) { struct sk_buff *nskb = skb_copy(skb, pri); - kfree_skb(skb); /* Free our shared copy */ + + /* Free our shared copy */ + if (likely(nskb)) + consume_skb(skb); + else + kfree_skb(skb); skb = nskb; } return skb; diff --git a/include/linux/slab.h b/include/linux/slab.h index 1d9abb7d22a0..c265bec6a57d 100644 --- a/include/linux/slab.h +++ b/include/linux/slab.h @@ -158,31 +158,6 @@ size_t ksize(const void *); #define ARCH_KMALLOC_MINALIGN __alignof__(unsigned long long) #endif -#ifdef CONFIG_SLOB -/* - * Common fields provided in kmem_cache by all slab allocators - * This struct is either used directly by the allocator (SLOB) - * or the allocator must include definitions for all fields - * provided in kmem_cache_common in their definition of kmem_cache. - * - * Once we can do anonymous structs (C11 standard) we could put a - * anonymous struct definition in these allocators so that the - * separate allocations in the kmem_cache structure of SLAB and - * SLUB is no longer needed. - */ -struct kmem_cache { - unsigned int object_size;/* The original size of the object */ - unsigned int size; /* The aligned/padded/added on size */ - unsigned int align; /* Alignment as calculated */ - unsigned long flags; /* Active flags on the slab */ - const char *name; /* Slab name for sysfs */ - int refcount; /* Use counter */ - void (*ctor)(void *); /* Called on object slot creation */ - struct list_head list; /* List of all slab caches on the system */ -}; - -#endif /* CONFIG_SLOB */ - /* * Kmalloc array related definitions */ @@ -363,14 +338,6 @@ kmem_cache_alloc_node_trace(struct kmem_cache *s, } #endif /* CONFIG_TRACING */ -#ifdef CONFIG_SLAB -#include <linux/slab_def.h> -#endif - -#ifdef CONFIG_SLUB -#include <linux/slub_def.h> -#endif - extern void *kmalloc_order(size_t size, gfp_t flags, unsigned int order); #ifdef CONFIG_TRACING @@ -582,37 +549,15 @@ static inline void *kcalloc(size_t n, size_t size, gfp_t flags) * allocator where we care about the real place the memory allocation * request comes from. */ -#if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB) || \ - (defined(CONFIG_SLAB) && defined(CONFIG_TRACING)) || \ - (defined(CONFIG_SLOB) && defined(CONFIG_TRACING)) extern void *__kmalloc_track_caller(size_t, gfp_t, unsigned long); #define kmalloc_track_caller(size, flags) \ __kmalloc_track_caller(size, flags, _RET_IP_) -#else -#define kmalloc_track_caller(size, flags) \ - __kmalloc(size, flags) -#endif /* DEBUG_SLAB */ #ifdef CONFIG_NUMA -/* - * kmalloc_node_track_caller is a special version of kmalloc_node that - * records the calling function of the routine calling it for slab leak - * tracking instead of just the calling function (confusing, eh?). - * It's useful when the call to kmalloc_node comes from a widely-used - * standard allocator where we care about the real place the memory - * allocation request comes from. - */ -#if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB) || \ - (defined(CONFIG_SLAB) && defined(CONFIG_TRACING)) || \ - (defined(CONFIG_SLOB) && defined(CONFIG_TRACING)) extern void *__kmalloc_node_track_caller(size_t, gfp_t, int, unsigned long); #define kmalloc_node_track_caller(size, flags, node) \ __kmalloc_node_track_caller(size, flags, node, \ _RET_IP_) -#else -#define kmalloc_node_track_caller(size, flags, node) \ - __kmalloc_node(size, flags, node) -#endif #else /* CONFIG_NUMA */ @@ -650,14 +595,7 @@ static inline void *kzalloc_node(size_t size, gfp_t flags, int node) return kmalloc_node(size, flags | __GFP_ZERO, node); } -/* - * Determine the size of a slab object - */ -static inline unsigned int kmem_cache_size(struct kmem_cache *s) -{ - return s->object_size; -} - +unsigned int kmem_cache_size(struct kmem_cache *s); void __init kmem_cache_init_late(void); #endif /* _LINUX_SLAB_H */ diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h index 8235dfbb3b05..b869d1662ba3 100644 --- a/include/linux/slab_def.h +++ b/include/linux/slab_def.h @@ -8,6 +8,8 @@ */ struct kmem_cache { + struct array_cache __percpu *cpu_cache; + /* 1) Cache tunables. Protected by slab_mutex */ unsigned int batchcount; unsigned int limit; @@ -71,23 +73,7 @@ struct kmem_cache { struct memcg_cache_params *memcg_params; #endif -/* 6) per-cpu/per-node data, touched during every alloc/free */ - /* - * We put array[] at the end of kmem_cache, because we want to size - * this array to nr_cpu_ids slots instead of NR_CPUS - * (see kmem_cache_init()) - * We still use [NR_CPUS] and not [1] or [0] because cache_cache - * is statically defined, so we reserve the max number of cpus. - * - * We also need to guarantee that the list is able to accomodate a - * pointer for each node since "nodelists" uses the remainder of - * available pointers. - */ - struct kmem_cache_node **node; - struct array_cache *array[NR_CPUS + MAX_NUMNODES]; - /* - * Do not add fields after array[] - */ + struct kmem_cache_node *node[MAX_NUMNODES]; }; #endif /* _LINUX_SLAB_DEF_H */ diff --git a/include/linux/smp.h b/include/linux/smp.h index 34347f26be9b..93dff5fff524 100644 --- a/include/linux/smp.h +++ b/include/linux/smp.h @@ -100,6 +100,7 @@ int smp_call_function_any(const struct cpumask *mask, smp_call_func_t func, void *info, int wait); void kick_all_cpus_sync(void); +void wake_up_all_idle_cpus(void); /* * Generic and arch helpers @@ -148,6 +149,7 @@ smp_call_function_any(const struct cpumask *mask, smp_call_func_t func, } static inline void kick_all_cpus_sync(void) { } +static inline void wake_up_all_idle_cpus(void) { } #endif /* !SMP */ diff --git a/include/linux/spi/mcp23s08.h b/include/linux/spi/mcp23s08.h index 2d676d5aaa89..aa07d7b32568 100644 --- a/include/linux/spi/mcp23s08.h +++ b/include/linux/spi/mcp23s08.h @@ -22,4 +22,22 @@ struct mcp23s08_platform_data { * base to base+15 (or base+31 for s17 variant). */ unsigned base; + /* Marks the device as a interrupt controller. + * NOTE: The interrupt functionality is only supported for i2c + * versions of the chips. The spi chips can also do the interrupts, + * but this is not supported by the linux driver yet. + */ + bool irq_controller; + + /* Sets the mirror flag in the IOCON register. Devices + * with two interrupt outputs (these are the devices ending with 17 and + * those that have 16 IOs) have two IO banks: IO 0-7 form bank 1 and + * IO 8-15 are bank 2. These chips have two different interrupt outputs: + * One for bank 1 and another for bank 2. If irq-mirror is set, both + * interrupts are generated regardless of the bank that an input change + * occurred on. If it is not set, the interrupt are only generated for + * the bank they belong to. + * On devices with only one interrupt output this property is useless. + */ + bool mirror; }; diff --git a/include/linux/spi/pxa2xx_spi.h b/include/linux/spi/pxa2xx_spi.h index 82d5111cd0c2..d5a316550177 100644 --- a/include/linux/spi/pxa2xx_spi.h +++ b/include/linux/spi/pxa2xx_spi.h @@ -23,6 +23,8 @@ #define PXA2XX_CS_ASSERT (0x01) #define PXA2XX_CS_DEASSERT (0x02) +struct dma_chan; + /* device.platform_data for SSP controller devices */ struct pxa2xx_spi_master { u32 clock_enable; @@ -30,10 +32,9 @@ struct pxa2xx_spi_master { u8 enable_dma; /* DMA engine specific config */ - int rx_chan_id; - int tx_chan_id; - int rx_slave_id; - int tx_slave_id; + bool (*dma_filter)(struct dma_chan *chan, void *param); + void *tx_param; + void *rx_param; /* For non-PXA arches */ struct ssp_device ssp; diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h index 3f2867ff0ced..262ba4ef9a8e 100644 --- a/include/linux/spinlock.h +++ b/include/linux/spinlock.h @@ -197,7 +197,13 @@ static inline void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock) _raw_spin_lock_nest_lock(lock, &(nest_lock)->dep_map); \ } while (0) #else -# define raw_spin_lock_nested(lock, subclass) _raw_spin_lock(lock) +/* + * Always evaluate the 'subclass' argument to avoid that the compiler + * warns about set-but-not-used variables when building with + * CONFIG_DEBUG_LOCK_ALLOC=n and with W=1. + */ +# define raw_spin_lock_nested(lock, subclass) \ + _raw_spin_lock(((void)(subclass), (lock))) # define raw_spin_lock_nest_lock(lock, nest_lock) _raw_spin_lock(lock) #endif diff --git a/include/linux/string.h b/include/linux/string.h index d36977e029af..e6edfe51575a 100644 --- a/include/linux/string.h +++ b/include/linux/string.h @@ -41,7 +41,7 @@ extern int strcmp(const char *,const char *); extern int strncmp(const char *,const char *,__kernel_size_t); #endif #ifndef __HAVE_ARCH_STRNICMP -extern int strnicmp(const char *, const char *, __kernel_size_t); +#define strnicmp strncasecmp #endif #ifndef __HAVE_ARCH_STRCASECMP extern int strcasecmp(const char *s1, const char *s2); diff --git a/include/linux/string_helpers.h b/include/linux/string_helpers.h index 3eeee9672a4a..6eb567ac56bc 100644 --- a/include/linux/string_helpers.h +++ b/include/linux/string_helpers.h @@ -20,40 +20,6 @@ int string_get_size(u64 size, enum string_size_units units, #define UNESCAPE_ANY \ (UNESCAPE_SPACE | UNESCAPE_OCTAL | UNESCAPE_HEX | UNESCAPE_SPECIAL) -/** - * string_unescape - unquote characters in the given string - * @src: source buffer (escaped) - * @dst: destination buffer (unescaped) - * @size: size of the destination buffer (0 to unlimit) - * @flags: combination of the flags (bitwise OR): - * %UNESCAPE_SPACE: - * '\f' - form feed - * '\n' - new line - * '\r' - carriage return - * '\t' - horizontal tab - * '\v' - vertical tab - * %UNESCAPE_OCTAL: - * '\NNN' - byte with octal value NNN (1 to 3 digits) - * %UNESCAPE_HEX: - * '\xHH' - byte with hexadecimal value HH (1 to 2 digits) - * %UNESCAPE_SPECIAL: - * '\"' - double quote - * '\\' - backslash - * '\a' - alert (BEL) - * '\e' - escape - * %UNESCAPE_ANY: - * all previous together - * - * Returns amount of characters processed to the destination buffer excluding - * trailing '\0'. - * - * Because the size of the output will be the same as or less than the size of - * the input, the transformation may be performed in place. - * - * Caller must provide valid source and destination pointers. Be aware that - * destination buffer will always be NULL-terminated. Source string must be - * NULL-terminated as well. - */ int string_unescape(char *src, char *dst, size_t size, unsigned int flags); static inline int string_unescape_inplace(char *buf, unsigned int flags) @@ -71,4 +37,35 @@ static inline int string_unescape_any_inplace(char *buf) return string_unescape_any(buf, buf, 0); } +#define ESCAPE_SPACE 0x01 +#define ESCAPE_SPECIAL 0x02 +#define ESCAPE_NULL 0x04 +#define ESCAPE_OCTAL 0x08 +#define ESCAPE_ANY \ + (ESCAPE_SPACE | ESCAPE_OCTAL | ESCAPE_SPECIAL | ESCAPE_NULL) +#define ESCAPE_NP 0x10 +#define ESCAPE_ANY_NP (ESCAPE_ANY | ESCAPE_NP) +#define ESCAPE_HEX 0x20 + +int string_escape_mem(const char *src, size_t isz, char **dst, size_t osz, + unsigned int flags, const char *esc); + +static inline int string_escape_mem_any_np(const char *src, size_t isz, + char **dst, size_t osz, const char *esc) +{ + return string_escape_mem(src, isz, dst, osz, ESCAPE_ANY_NP, esc); +} + +static inline int string_escape_str(const char *src, char **dst, size_t sz, + unsigned int flags, const char *esc) +{ + return string_escape_mem(src, strlen(src), dst, sz, flags, esc); +} + +static inline int string_escape_str_any_np(const char *src, char **dst, + size_t sz, const char *esc) +{ + return string_escape_str(src, dst, sz, ESCAPE_ANY_NP, esc); +} + #endif diff --git a/include/linux/suspend.h b/include/linux/suspend.h index 519064e0c943..3388c1b6f7d8 100644 --- a/include/linux/suspend.h +++ b/include/linux/suspend.h @@ -189,6 +189,8 @@ struct platform_suspend_ops { struct platform_freeze_ops { int (*begin)(void); + int (*prepare)(void); + void (*restore)(void); void (*end)(void); }; @@ -371,6 +373,8 @@ extern int unregister_pm_notifier(struct notifier_block *nb); extern bool events_check_enabled; extern bool pm_wakeup_pending(void); +extern void pm_system_wakeup(void); +extern void pm_wakeup_clear(void); extern bool pm_get_wakeup_count(unsigned int *count, bool block); extern bool pm_save_wakeup_count(unsigned int count); extern void pm_wakep_autosleep_enabled(bool set); @@ -418,6 +422,8 @@ static inline int unregister_pm_notifier(struct notifier_block *nb) #define pm_notifier(fn, pri) do { (void)(fn); } while (0) static inline bool pm_wakeup_pending(void) { return false; } +static inline void pm_system_wakeup(void) {} +static inline void pm_wakeup_clear(void) {} static inline void lock_system_sleep(void) {} static inline void unlock_system_sleep(void) {} diff --git a/include/linux/swap.h b/include/linux/swap.h index 1b72060f093a..37a585beef5c 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h @@ -327,8 +327,10 @@ extern void lru_cache_add_active_or_unevictable(struct page *page, extern unsigned long try_to_free_pages(struct zonelist *zonelist, int order, gfp_t gfp_mask, nodemask_t *mask); extern int __isolate_lru_page(struct page *page, isolate_mode_t mode); -extern unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem, - gfp_t gfp_mask, bool noswap); +extern unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *memcg, + unsigned long nr_pages, + gfp_t gfp_mask, + bool may_swap); extern unsigned long mem_cgroup_shrink_node_zone(struct mem_cgroup *mem, gfp_t gfp_mask, bool noswap, struct zone *zone, @@ -354,22 +356,6 @@ static inline int zone_reclaim(struct zone *z, gfp_t mask, unsigned int order) extern int page_evictable(struct page *page); extern void check_move_unevictable_pages(struct page **, int nr_pages); -extern unsigned long scan_unevictable_pages; -extern int scan_unevictable_handler(struct ctl_table *, int, - void __user *, size_t *, loff_t *); -#ifdef CONFIG_NUMA -extern int scan_unevictable_register_node(struct node *node); -extern void scan_unevictable_unregister_node(struct node *node); -#else -static inline int scan_unevictable_register_node(struct node *node) -{ - return 0; -} -static inline void scan_unevictable_unregister_node(struct node *node) -{ -} -#endif - extern int kswapd_run(int nid); extern void kswapd_stop(int nid); #ifdef CONFIG_MEMCG diff --git a/include/linux/t10-pi.h b/include/linux/t10-pi.h new file mode 100644 index 000000000000..6a8b9942632d --- /dev/null +++ b/include/linux/t10-pi.h @@ -0,0 +1,22 @@ +#ifndef _LINUX_T10_PI_H +#define _LINUX_T10_PI_H + +#include <linux/types.h> +#include <linux/blkdev.h> + +/* + * T10 Protection Information tuple. + */ +struct t10_pi_tuple { + __be16 guard_tag; /* Checksum */ + __be16 app_tag; /* Opaque storage */ + __be32 ref_tag; /* Target LBA or indirect LBA */ +}; + + +extern struct blk_integrity t10_pi_type1_crc; +extern struct blk_integrity t10_pi_type1_ip; +extern struct blk_integrity t10_pi_type3_crc; +extern struct blk_integrity t10_pi_type3_ip; + +#endif diff --git a/include/linux/tick.h b/include/linux/tick.h index 9a82c7dc3fdd..eda850ca757a 100644 --- a/include/linux/tick.h +++ b/include/linux/tick.h @@ -108,7 +108,7 @@ extern struct tick_sched *tick_get_tick_sched(int cpu); extern void tick_irq_enter(void); extern int tick_oneshot_mode_active(void); # ifndef arch_needs_cpu -# define arch_needs_cpu(cpu) (0) +# define arch_needs_cpu() (0) # endif # else static inline void tick_clock_notify(void) { } @@ -181,14 +181,12 @@ static inline bool tick_nohz_full_cpu(int cpu) return cpumask_test_cpu(cpu, tick_nohz_full_mask); } -extern void tick_nohz_init(void); extern void __tick_nohz_full_check(void); extern void tick_nohz_full_kick(void); extern void tick_nohz_full_kick_cpu(int cpu); extern void tick_nohz_full_kick_all(void); extern void __tick_nohz_task_switch(struct task_struct *tsk); #else -static inline void tick_nohz_init(void) { } static inline bool tick_nohz_full_enabled(void) { return false; } static inline bool tick_nohz_full_cpu(int cpu) { return false; } static inline void __tick_nohz_full_check(void) { } diff --git a/include/linux/topology.h b/include/linux/topology.h index dda6ee521e74..909b6e43b694 100644 --- a/include/linux/topology.h +++ b/include/linux/topology.h @@ -119,11 +119,20 @@ static inline int numa_node_id(void) * Use the accessor functions set_numa_mem(), numa_mem_id() and cpu_to_mem(). */ DECLARE_PER_CPU(int, _numa_mem_); +extern int _node_numa_mem_[MAX_NUMNODES]; #ifndef set_numa_mem static inline void set_numa_mem(int node) { this_cpu_write(_numa_mem_, node); + _node_numa_mem_[numa_node_id()] = node; +} +#endif + +#ifndef node_to_mem_node +static inline int node_to_mem_node(int node) +{ + return _node_numa_mem_[node]; } #endif @@ -146,6 +155,7 @@ static inline int cpu_to_mem(int cpu) static inline void set_cpu_numa_mem(int cpu, int node) { per_cpu(_numa_mem_, cpu) = node; + _node_numa_mem_[cpu_to_node(cpu)] = node; } #endif @@ -159,6 +169,13 @@ static inline int numa_mem_id(void) } #endif +#ifndef node_to_mem_node +static inline int node_to_mem_node(int node) +{ + return node; +} +#endif + #ifndef cpu_to_mem static inline int cpu_to_mem(int cpu) { diff --git a/include/linux/torture.h b/include/linux/torture.h index 5ca58fcbaf1b..7759fc3c622d 100644 --- a/include/linux/torture.h +++ b/include/linux/torture.h @@ -51,7 +51,7 @@ /* Definitions for online/offline exerciser. */ int torture_onoff_init(long ooholdoff, long oointerval); -char *torture_onoff_stats(char *page); +void torture_onoff_stats(void); bool torture_onoff_failures(void); /* Low-rider random number generator. */ @@ -77,7 +77,8 @@ int torture_stutter_init(int s); /* Initialization and cleanup. */ bool torture_init_begin(char *ttype, bool v, int *runnable); void torture_init_end(void); -bool torture_cleanup(void); +bool torture_cleanup_begin(void); +void torture_cleanup_end(void); bool torture_must_stop(void); bool torture_must_stop_irq(void); void torture_kthread_stopping(char *title); diff --git a/include/linux/tracepoint.h b/include/linux/tracepoint.h index b1293f15f592..e08e21e5f601 100644 --- a/include/linux/tracepoint.h +++ b/include/linux/tracepoint.h @@ -157,6 +157,12 @@ extern void syscall_unregfunc(void); * Make sure the alignment of the structure in the __tracepoints section will * not add unwanted padding between the beginning of the section and the * structure. Force alignment to the same alignment as the section start. + * + * When lockdep is enabled, we make sure to always do the RCU portions of + * the tracepoint code, regardless of whether tracing is on or we match the + * condition. This lets us find RCU issues triggered with tracepoints even + * when this tracepoint is off. This code has no purpose other than poking + * RCU a bit. */ #define __DECLARE_TRACE(name, proto, args, cond, data_proto, data_args) \ extern struct tracepoint __tracepoint_##name; \ @@ -167,6 +173,11 @@ extern void syscall_unregfunc(void); TP_PROTO(data_proto), \ TP_ARGS(data_args), \ TP_CONDITION(cond),,); \ + if (IS_ENABLED(CONFIG_LOCKDEP)) { \ + rcu_read_lock_sched_notrace(); \ + rcu_dereference_sched(__tracepoint_##name.funcs);\ + rcu_read_unlock_sched_notrace(); \ + } \ } \ __DECLARE_TRACE_RCU(name, PARAMS(proto), PARAMS(args), \ PARAMS(cond), PARAMS(data_proto), PARAMS(data_args)) \ diff --git a/include/linux/uio.h b/include/linux/uio.h index 290fbf0b6b8a..9b1581414cd4 100644 --- a/include/linux/uio.h +++ b/include/linux/uio.h @@ -80,6 +80,9 @@ size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes, struct iov_iter *i); size_t copy_page_from_iter(struct page *page, size_t offset, size_t bytes, struct iov_iter *i); +size_t copy_to_iter(void *addr, size_t bytes, struct iov_iter *i); +size_t copy_from_iter(void *addr, size_t bytes, struct iov_iter *i); +size_t iov_iter_zero(size_t bytes, struct iov_iter *); unsigned long iov_iter_alignment(const struct iov_iter *i); void iov_iter_init(struct iov_iter *i, int direction, const struct iovec *iov, unsigned long nr_segs, size_t count); diff --git a/include/linux/virtio.h b/include/linux/virtio.h index b46671e28de2..65261a7244fc 100644 --- a/include/linux/virtio.h +++ b/include/linux/virtio.h @@ -78,6 +78,10 @@ bool virtqueue_is_broken(struct virtqueue *vq); /** * virtio_device - representation of a device using virtio * @index: unique position on the virtio bus + * @failed: saved value for CONFIG_S_FAILED bit (for restore) + * @config_enabled: configuration change reporting enabled + * @config_change_pending: configuration change reported while disabled + * @config_lock: protects configuration change reporting * @dev: underlying device. * @id: the device type identification (used to match it with a driver). * @config: the configuration ops for this device. @@ -88,6 +92,10 @@ bool virtqueue_is_broken(struct virtqueue *vq); */ struct virtio_device { int index; + bool failed; + bool config_enabled; + bool config_change_pending; + spinlock_t config_lock; struct device dev; struct virtio_device_id id; const struct virtio_config_ops *config; @@ -108,6 +116,12 @@ void unregister_virtio_device(struct virtio_device *dev); void virtio_break_device(struct virtio_device *dev); +void virtio_config_changed(struct virtio_device *dev); +#ifdef CONFIG_PM_SLEEP +int virtio_device_freeze(struct virtio_device *dev); +int virtio_device_restore(struct virtio_device *dev); +#endif + /** * virtio_driver - operations for a virtio I/O driver * @driver: underlying device driver (populate name and owner). diff --git a/include/linux/virtio_config.h b/include/linux/virtio_config.h index e8f8f71e843c..7f4ef66873ef 100644 --- a/include/linux/virtio_config.h +++ b/include/linux/virtio_config.h @@ -109,6 +109,23 @@ struct virtqueue *virtio_find_single_vq(struct virtio_device *vdev, return vq; } +/** + * virtio_device_ready - enable vq use in probe function + * @vdev: the device + * + * Driver must call this to use vqs in the probe function. + * + * Note: vqs are enabled automatically after probe returns. + */ +static inline +void virtio_device_ready(struct virtio_device *dev) +{ + unsigned status = dev->config->get_status(dev); + + BUG_ON(status & VIRTIO_CONFIG_S_DRIVER_OK); + dev->config->set_status(dev, status | VIRTIO_CONFIG_S_DRIVER_OK); +} + static inline const char *virtio_bus_name(struct virtio_device *vdev) { diff --git a/include/linux/vm_event_item.h b/include/linux/vm_event_item.h index ced92345c963..730334cdf037 100644 --- a/include/linux/vm_event_item.h +++ b/include/linux/vm_event_item.h @@ -72,6 +72,13 @@ enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT, THP_ZERO_PAGE_ALLOC, THP_ZERO_PAGE_ALLOC_FAILED, #endif +#ifdef CONFIG_MEMORY_BALLOON + BALLOON_INFLATE, + BALLOON_DEFLATE, +#ifdef CONFIG_BALLOON_COMPACTION + BALLOON_MIGRATE, +#endif +#endif #ifdef CONFIG_DEBUG_TLBFLUSH #ifdef CONFIG_SMP NR_TLB_REMOTE_FLUSH, /* cpu tried to flush others' tlbs */ diff --git a/include/linux/wait.h b/include/linux/wait.h index 80115bf88671..e4a8eb9312ea 100644 --- a/include/linux/wait.h +++ b/include/linux/wait.h @@ -281,9 +281,11 @@ do { \ * wake_up() has to be called after changing any variable that could * change the result of the wait condition. * - * The function returns 0 if the @timeout elapsed, or the remaining - * jiffies (at least 1) if the @condition evaluated to %true before - * the @timeout elapsed. + * Returns: + * 0 if the @condition evaluated to %false after the @timeout elapsed, + * 1 if the @condition evaluated to %true after the @timeout elapsed, + * or the remaining jiffies (at least 1) if the @condition evaluated + * to %true before the @timeout elapsed. */ #define wait_event_timeout(wq, condition, timeout) \ ({ \ @@ -364,9 +366,11 @@ do { \ * change the result of the wait condition. * * Returns: - * 0 if the @timeout elapsed, -%ERESTARTSYS if it was interrupted by - * a signal, or the remaining jiffies (at least 1) if the @condition - * evaluated to %true before the @timeout elapsed. + * 0 if the @condition evaluated to %false after the @timeout elapsed, + * 1 if the @condition evaluated to %true after the @timeout elapsed, + * the remaining jiffies (at least 1) if the @condition evaluated + * to %true before the @timeout elapsed, or -%ERESTARTSYS if it was + * interrupted by a signal. */ #define wait_event_interruptible_timeout(wq, condition, timeout) \ ({ \ diff --git a/include/linux/zsmalloc.h b/include/linux/zsmalloc.h index e44d634e7fb7..05c214760977 100644 --- a/include/linux/zsmalloc.h +++ b/include/linux/zsmalloc.h @@ -46,6 +46,6 @@ void *zs_map_object(struct zs_pool *pool, unsigned long handle, enum zs_mapmode mm); void zs_unmap_object(struct zs_pool *pool, unsigned long handle); -u64 zs_get_total_size_bytes(struct zs_pool *pool); +unsigned long zs_get_total_pages(struct zs_pool *pool); #endif |