diff options
Diffstat (limited to 'include')
-rw-r--r-- | include/asm-ia64/dma-mapping.h | 2 | ||||
-rw-r--r-- | include/asm-ia64/scatterlist.h | 2 | ||||
-rw-r--r-- | include/asm-powerpc/dma-mapping.h | 158 | ||||
-rw-r--r-- | include/asm-powerpc/scatterlist.h | 2 | ||||
-rw-r--r-- | include/asm-sparc/scatterlist.h | 2 | ||||
-rw-r--r-- | include/asm-sparc64/scatterlist.h | 2 | ||||
-rw-r--r-- | include/asm-x86/dma-mapping_32.h | 13 | ||||
-rw-r--r-- | include/asm-x86/dma-mapping_64.h | 3 | ||||
-rw-r--r-- | include/asm-x86/scatterlist_32.h | 2 | ||||
-rw-r--r-- | include/asm-x86/scatterlist_64.h | 2 | ||||
-rw-r--r-- | include/linux/i2o.h | 3 | ||||
-rw-r--r-- | include/linux/ide.h | 2 | ||||
-rw-r--r-- | include/linux/libata.h | 16 | ||||
-rw-r--r-- | include/linux/scatterlist.h | 84 | ||||
-rw-r--r-- | include/scsi/scsi.h | 7 | ||||
-rw-r--r-- | include/scsi/scsi_cmnd.h | 7 | ||||
-rw-r--r-- | include/scsi/scsi_host.h | 13 |
17 files changed, 144 insertions, 176 deletions
diff --git a/include/asm-ia64/dma-mapping.h b/include/asm-ia64/dma-mapping.h index 3ca6d5c14b2..f1735a22d0e 100644 --- a/include/asm-ia64/dma-mapping.h +++ b/include/asm-ia64/dma-mapping.h @@ -6,7 +6,7 @@ * David Mosberger-Tang <davidm@hpl.hp.com> */ #include <asm/machvec.h> -#include <asm/scatterlist.h> +#include <linux/scatterlist.h> #define dma_alloc_coherent platform_dma_alloc_coherent /* coherent mem. is cheap */ diff --git a/include/asm-ia64/scatterlist.h b/include/asm-ia64/scatterlist.h index a452ea24205..7d5234d5031 100644 --- a/include/asm-ia64/scatterlist.h +++ b/include/asm-ia64/scatterlist.h @@ -30,4 +30,6 @@ struct scatterlist { #define sg_dma_len(sg) ((sg)->dma_length) #define sg_dma_address(sg) ((sg)->dma_address) +#define ARCH_HAS_SG_CHAIN + #endif /* _ASM_IA64_SCATTERLIST_H */ diff --git a/include/asm-powerpc/dma-mapping.h b/include/asm-powerpc/dma-mapping.h index d05891608f7..2af321f36ab 100644 --- a/include/asm-powerpc/dma-mapping.h +++ b/include/asm-powerpc/dma-mapping.h @@ -6,149 +6,6 @@ */ #ifndef _ASM_DMA_MAPPING_H #define _ASM_DMA_MAPPING_H -#ifdef __KERNEL__ - -#include <linux/types.h> -#include <linux/cache.h> -/* need struct page definitions */ -#include <linux/mm.h> -#include <asm/scatterlist.h> -#include <asm/io.h> - -#define DMA_ERROR_CODE (~(dma_addr_t)0x0) - -#ifdef CONFIG_NOT_COHERENT_CACHE -/* - * DMA-consistent mapping functions for PowerPCs that don't support - * cache snooping. These allocate/free a region of uncached mapped - * memory space for use with DMA devices. Alternatively, you could - * allocate the space "normally" and use the cache management functions - * to ensure it is consistent. - */ -extern void *__dma_alloc_coherent(size_t size, dma_addr_t *handle, gfp_t gfp); -extern void __dma_free_coherent(size_t size, void *vaddr); -extern void __dma_sync(void *vaddr, size_t size, int direction); -extern void __dma_sync_page(struct page *page, unsigned long offset, - size_t size, int direction); - -#else /* ! CONFIG_NOT_COHERENT_CACHE */ -/* - * Cache coherent cores. - */ - -#define __dma_alloc_coherent(gfp, size, handle) NULL -#define __dma_free_coherent(size, addr) ((void)0) -#define __dma_sync(addr, size, rw) ((void)0) -#define __dma_sync_page(pg, off, sz, rw) ((void)0) - -#endif /* ! CONFIG_NOT_COHERENT_CACHE */ - -#ifdef CONFIG_PPC64 -/* - * DMA operations are abstracted for G5 vs. i/pSeries, PCI vs. VIO - */ -struct dma_mapping_ops { - void * (*alloc_coherent)(struct device *dev, size_t size, - dma_addr_t *dma_handle, gfp_t flag); - void (*free_coherent)(struct device *dev, size_t size, - void *vaddr, dma_addr_t dma_handle); - dma_addr_t (*map_single)(struct device *dev, void *ptr, - size_t size, enum dma_data_direction direction); - void (*unmap_single)(struct device *dev, dma_addr_t dma_addr, - size_t size, enum dma_data_direction direction); - int (*map_sg)(struct device *dev, struct scatterlist *sg, - int nents, enum dma_data_direction direction); - void (*unmap_sg)(struct device *dev, struct scatterlist *sg, - int nents, enum dma_data_direction direction); - int (*dma_supported)(struct device *dev, u64 mask); - int (*set_dma_mask)(struct device *dev, u64 dma_mask); -}; - -static inline struct dma_mapping_ops *get_dma_ops(struct device *dev) -{ - /* We don't handle the NULL dev case for ISA for now. We could - * do it via an out of line call but it is not needed for now. The - * only ISA DMA device we support is the floppy and we have a hack - * in the floppy driver directly to get a device for us. - */ - if (unlikely(dev == NULL || dev->archdata.dma_ops == NULL)) - return NULL; - return dev->archdata.dma_ops; -} - -static inline int dma_supported(struct device *dev, u64 mask) -{ - struct dma_mapping_ops *dma_ops = get_dma_ops(dev); - - if (unlikely(dma_ops == NULL)) - return 0; - if (dma_ops->dma_supported == NULL) - return 1; - return dma_ops->dma_supported(dev, mask); -} - -static inline int dma_set_mask(struct device *dev, u64 dma_mask) -{ - struct dma_mapping_ops *dma_ops = get_dma_ops(dev); - - if (unlikely(dma_ops == NULL)) - return -EIO; - if (dma_ops->set_dma_mask != NULL) - return dma_ops->set_dma_mask(dev, dma_mask); - if (!dev->dma_mask || !dma_supported(dev, dma_mask)) - return -EIO; - *dev->dma_mask = dma_mask; - return 0; -} - -static inline void *dma_alloc_coherent(struct device *dev, size_t size, - dma_addr_t *dma_handle, gfp_t flag) -{ - struct dma_mapping_ops *dma_ops = get_dma_ops(dev); - - BUG_ON(!dma_ops); - return dma_ops->alloc_coherent(dev, size, dma_handle, flag); -} - -static inline void dma_free_coherent(struct device *dev, size_t size, - void *cpu_addr, dma_addr_t dma_handle) -{ - struct dma_mapping_ops *dma_ops = get_dma_ops(dev); - - BUG_ON(!dma_ops); - dma_ops->free_coherent(dev, size, cpu_addr, dma_handle); -} - -static inline dma_addr_t dma_map_single(struct device *dev, void *cpu_addr, - size_t size, - enum dma_data_direction direction) -{ - struct dma_mapping_ops *dma_ops = get_dma_ops(dev); - - BUG_ON(!dma_ops); - return dma_ops->map_single(dev, cpu_addr, size, direction); -} - -static inline void dma_unmap_single(struct device *dev, dma_addr_t dma_addr, - size_t size, - enum dma_data_direction direction) -{ - struct dma_mapping_ops *dma_ops = get_dma_ops(dev); - - BUG_ON(!dma_ops); - dma_ops->unmap_single(dev, dma_addr, size, direction); -} - -static inline dma_addr_t dma_map_page(struct device *dev, struct page *page, - unsigned long offset, size_t size, - enum dma_data_direction direction) -{ - struct dma_mapping_ops *dma_ops = get_dma_ops(dev); - - BUG_ON(!dma_ops); - return dma_ops->map_single(dev, page_address(page) + offset, size, - direction); -} static inline void dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size, @@ -276,14 +133,15 @@ static inline void dma_unmap_page(struct device *dev, dma_addr_t dma_address, } static inline int -dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, +dma_map_sg(struct device *dev, struct scatterlist *sgl, int nents, enum dma_data_direction direction) { + struct scatterlist *sg; int i; BUG_ON(direction == DMA_NONE); - for (i = 0; i < nents; i++, sg++) { + for_each_sg(sgl, sg, nents, i) { BUG_ON(!sg->page); __dma_sync_page(sg->page, sg->offset, sg->length, direction); sg->dma_address = page_to_bus(sg->page) + sg->offset; @@ -318,26 +176,28 @@ static inline void dma_sync_single_for_device(struct device *dev, } static inline void dma_sync_sg_for_cpu(struct device *dev, - struct scatterlist *sg, int nents, + struct scatterlist *sgl, int nents, enum dma_data_direction direction) { + struct scatterlist *sg; int i; BUG_ON(direction == DMA_NONE); - for (i = 0; i < nents; i++, sg++) + for_each_sg(sgl, sg, nents, i) __dma_sync_page(sg->page, sg->offset, sg->length, direction); } static inline void dma_sync_sg_for_device(struct device *dev, - struct scatterlist *sg, int nents, + struct scatterlist *sgl, int nents, enum dma_data_direction direction) { + struct scatterlist *sg; int i; BUG_ON(direction == DMA_NONE); - for (i = 0; i < nents; i++, sg++) + for_each_sg(sgl, sg, nents, i) __dma_sync_page(sg->page, sg->offset, sg->length, direction); } diff --git a/include/asm-powerpc/scatterlist.h b/include/asm-powerpc/scatterlist.h index 8c992d1491d..b075f619c3b 100644 --- a/include/asm-powerpc/scatterlist.h +++ b/include/asm-powerpc/scatterlist.h @@ -41,5 +41,7 @@ struct scatterlist { #define ISA_DMA_THRESHOLD (~0UL) #endif +#define ARCH_HAS_SG_CHAIN + #endif /* __KERNEL__ */ #endif /* _ASM_POWERPC_SCATTERLIST_H */ diff --git a/include/asm-sparc/scatterlist.h b/include/asm-sparc/scatterlist.h index a4fcf9ac964..4055af90ad7 100644 --- a/include/asm-sparc/scatterlist.h +++ b/include/asm-sparc/scatterlist.h @@ -19,4 +19,6 @@ struct scatterlist { #define ISA_DMA_THRESHOLD (~0UL) +#define ARCH_HAS_SG_CHAIN + #endif /* !(_SPARC_SCATTERLIST_H) */ diff --git a/include/asm-sparc64/scatterlist.h b/include/asm-sparc64/scatterlist.h index 048fdb40e81..703c5bbe6c8 100644 --- a/include/asm-sparc64/scatterlist.h +++ b/include/asm-sparc64/scatterlist.h @@ -20,4 +20,6 @@ struct scatterlist { #define ISA_DMA_THRESHOLD (~0UL) +#define ARCH_HAS_SG_CHAIN + #endif /* !(_SPARC64_SCATTERLIST_H) */ diff --git a/include/asm-x86/dma-mapping_32.h b/include/asm-x86/dma-mapping_32.h index f1d72d177f6..6a2d26cb5da 100644 --- a/include/asm-x86/dma-mapping_32.h +++ b/include/asm-x86/dma-mapping_32.h @@ -2,10 +2,10 @@ #define _ASM_I386_DMA_MAPPING_H #include <linux/mm.h> +#include <linux/scatterlist.h> #include <asm/cache.h> #include <asm/io.h> -#include <asm/scatterlist.h> #include <asm/bug.h> #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) @@ -35,18 +35,19 @@ dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size, } static inline int -dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, +dma_map_sg(struct device *dev, struct scatterlist *sglist, int nents, enum dma_data_direction direction) { + struct scatterlist *sg; int i; BUG_ON(!valid_dma_direction(direction)); - WARN_ON(nents == 0 || sg[0].length == 0); + WARN_ON(nents == 0 || sglist[0].length == 0); - for (i = 0; i < nents; i++ ) { - BUG_ON(!sg[i].page); + for_each_sg(sglist, sg, nents, i) { + BUG_ON(!sg->page); - sg[i].dma_address = page_to_phys(sg[i].page) + sg[i].offset; + sg->dma_address = page_to_phys(sg->page) + sg->offset; } flush_write_buffers(); diff --git a/include/asm-x86/dma-mapping_64.h b/include/asm-x86/dma-mapping_64.h index 6897e2a436e..ecd0f6125ba 100644 --- a/include/asm-x86/dma-mapping_64.h +++ b/include/asm-x86/dma-mapping_64.h @@ -6,8 +6,7 @@ * documentation. */ - -#include <asm/scatterlist.h> +#include <linux/scatterlist.h> #include <asm/io.h> #include <asm/swiotlb.h> diff --git a/include/asm-x86/scatterlist_32.h b/include/asm-x86/scatterlist_32.h index d7e45a8f1aa..bd5164aa8f6 100644 --- a/include/asm-x86/scatterlist_32.h +++ b/include/asm-x86/scatterlist_32.h @@ -10,6 +10,8 @@ struct scatterlist { unsigned int length; }; +#define ARCH_HAS_SG_CHAIN + /* These macros should be used after a pci_map_sg call has been done * to get bus addresses of each of the SG entries and their lengths. * You should only work with the number of sg entries pci_map_sg diff --git a/include/asm-x86/scatterlist_64.h b/include/asm-x86/scatterlist_64.h index eaf7ada27e1..ef3986ba4b7 100644 --- a/include/asm-x86/scatterlist_64.h +++ b/include/asm-x86/scatterlist_64.h @@ -11,6 +11,8 @@ struct scatterlist { unsigned int dma_length; }; +#define ARCH_HAS_SG_CHAIN + #define ISA_DMA_THRESHOLD (0x00ffffff) /* These macros should be used after a pci_map_sg call has been done diff --git a/include/linux/i2o.h b/include/linux/i2o.h index 9752307d16b..7da5b98d90e 100644 --- a/include/linux/i2o.h +++ b/include/linux/i2o.h @@ -32,6 +32,7 @@ #include <linux/workqueue.h> /* work_struct */ #include <linux/mempool.h> #include <linux/mutex.h> +#include <linux/scatterlist.h> #include <asm/io.h> #include <asm/semaphore.h> /* Needed for MUTEX init macros */ @@ -837,7 +838,7 @@ static inline int i2o_dma_map_sg(struct i2o_controller *c, if ((sizeof(dma_addr_t) > 4) && c->pae_support) *mptr++ = cpu_to_le32(i2o_dma_high(sg_dma_address(sg))); #endif - sg++; + sg = sg_next(sg); } *sg_ptr = mptr; diff --git a/include/linux/ide.h b/include/linux/ide.h index 234fa3df24f..30a1931466a 100644 --- a/include/linux/ide.h +++ b/include/linux/ide.h @@ -772,7 +772,7 @@ typedef struct hwif_s { unsigned int nsect; unsigned int nleft; - unsigned int cursg; + struct scatterlist *cursg; unsigned int cursg_ofs; int rqsize; /* max sectors per request */ diff --git a/include/linux/libata.h b/include/linux/libata.h index 229a9ff9f92..377e6d4d9be 100644 --- a/include/linux/libata.h +++ b/include/linux/libata.h @@ -29,7 +29,7 @@ #include <linux/delay.h> #include <linux/interrupt.h> #include <linux/dma-mapping.h> -#include <asm/scatterlist.h> +#include <linux/scatterlist.h> #include <linux/io.h> #include <linux/ata.h> #include <linux/workqueue.h> @@ -416,6 +416,7 @@ struct ata_queued_cmd { unsigned long flags; /* ATA_QCFLAG_xxx */ unsigned int tag; unsigned int n_elem; + unsigned int n_iter; unsigned int orig_n_elem; int dma_dir; @@ -426,7 +427,7 @@ struct ata_queued_cmd { unsigned int nbytes; unsigned int curbytes; - unsigned int cursg; + struct scatterlist *cursg; unsigned int cursg_ofs; struct scatterlist sgent; @@ -1043,7 +1044,7 @@ ata_sg_is_last(struct scatterlist *sg, struct ata_queued_cmd *qc) return 1; if (qc->pad_len) return 0; - if (((sg - qc->__sg) + 1) == qc->n_elem) + if (qc->n_iter == qc->n_elem) return 1; return 0; } @@ -1051,6 +1052,7 @@ ata_sg_is_last(struct scatterlist *sg, struct ata_queued_cmd *qc) static inline struct scatterlist * ata_qc_first_sg(struct ata_queued_cmd *qc) { + qc->n_iter = 0; if (qc->n_elem) return qc->__sg; if (qc->pad_len) @@ -1063,8 +1065,8 @@ ata_qc_next_sg(struct scatterlist *sg, struct ata_queued_cmd *qc) { if (sg == &qc->pad_sgent) return NULL; - if (++sg - qc->__sg < qc->n_elem) - return sg; + if (++qc->n_iter < qc->n_elem) + return sg_next(sg); if (qc->pad_len) return &qc->pad_sgent; return NULL; @@ -1309,9 +1311,11 @@ static inline void ata_qc_reinit(struct ata_queued_cmd *qc) qc->dma_dir = DMA_NONE; qc->__sg = NULL; qc->flags = 0; - qc->cursg = qc->cursg_ofs = 0; + qc->cursg = NULL; + qc->cursg_ofs = 0; qc->nbytes = qc->curbytes = 0; qc->n_elem = 0; + qc->n_iter = 0; qc->err_mask = 0; qc->pad_len = 0; qc->sect_size = ATA_SECT_SIZE; diff --git a/include/linux/scatterlist.h b/include/linux/scatterlist.h index 4efbd9c445f..2dc7464cce5 100644 --- a/include/linux/scatterlist.h +++ b/include/linux/scatterlist.h @@ -20,4 +20,88 @@ static inline void sg_init_one(struct scatterlist *sg, const void *buf, sg_set_buf(sg, buf, buflen); } +/* + * We overload the LSB of the page pointer to indicate whether it's + * a valid sg entry, or whether it points to the start of a new scatterlist. + * Those low bits are there for everyone! (thanks mason :-) + */ +#define sg_is_chain(sg) ((unsigned long) (sg)->page & 0x01) +#define sg_chain_ptr(sg) \ + ((struct scatterlist *) ((unsigned long) (sg)->page & ~0x01)) + +/** + * sg_next - return the next scatterlist entry in a list + * @sg: The current sg entry + * + * Usually the next entry will be @sg@ + 1, but if this sg element is part + * of a chained scatterlist, it could jump to the start of a new + * scatterlist array. + * + * Note that the caller must ensure that there are further entries after + * the current entry, this function will NOT return NULL for an end-of-list. + * + */ +static inline struct scatterlist *sg_next(struct scatterlist *sg) +{ + sg++; + + if (unlikely(sg_is_chain(sg))) + sg = sg_chain_ptr(sg); + + return sg; +} + +/* + * Loop over each sg element, following the pointer to a new list if necessary + */ +#define for_each_sg(sglist, sg, nr, __i) \ + for (__i = 0, sg = (sglist); __i < (nr); __i++, sg = sg_next(sg)) + +/** + * sg_last - return the last scatterlist entry in a list + * @sgl: First entry in the scatterlist + * @nents: Number of entries in the scatterlist + * + * Should only be used casually, it (currently) scan the entire list + * to get the last entry. + * + * Note that the @sgl@ pointer passed in need not be the first one, + * the important bit is that @nents@ denotes the number of entries that + * exist from @sgl@. + * + */ +static inline struct scatterlist *sg_last(struct scatterlist *sgl, + unsigned int nents) +{ +#ifndef ARCH_HAS_SG_CHAIN + struct scatterlist *ret = &sgl[nents - 1]; +#else + struct scatterlist *sg, *ret = NULL; + int i; + + for_each_sg(sgl, sg, nents, i) + ret = sg; + +#endif + return ret; +} + +/** + * sg_chain - Chain two sglists together + * @prv: First scatterlist + * @prv_nents: Number of entries in prv + * @sgl: Second scatterlist + * + * Links @prv@ and @sgl@ together, to form a longer scatterlist. + * + */ +static inline void sg_chain(struct scatterlist *prv, unsigned int prv_nents, + struct scatterlist *sgl) +{ +#ifndef ARCH_HAS_SG_CHAIN + BUG(); +#endif + prv[prv_nents - 1].page = (struct page *) ((unsigned long) sgl | 0x01); +} + #endif /* _LINUX_SCATTERLIST_H */ diff --git a/include/scsi/scsi.h b/include/scsi/scsi.h index 9f8f80ab0c8..702fcfeb37f 100644 --- a/include/scsi/scsi.h +++ b/include/scsi/scsi.h @@ -11,13 +11,6 @@ #include <linux/types.h> /* - * The maximum sg list length SCSI can cope with - * (currently must be a power of 2 between 32 and 256) - */ -#define SCSI_MAX_PHYS_SEGMENTS MAX_PHYS_SEGMENTS - - -/* * SCSI command lengths */ diff --git a/include/scsi/scsi_cmnd.h b/include/scsi/scsi_cmnd.h index 65ab5145a09..3f47e522a1e 100644 --- a/include/scsi/scsi_cmnd.h +++ b/include/scsi/scsi_cmnd.h @@ -5,6 +5,7 @@ #include <linux/list.h> #include <linux/types.h> #include <linux/timer.h> +#include <linux/scatterlist.h> struct request; struct scatterlist; @@ -68,7 +69,7 @@ struct scsi_cmnd { /* These elements define the operation we ultimately want to perform */ unsigned short use_sg; /* Number of pieces of scatter-gather */ - unsigned short sglist_len; /* size of malloc'd scatter-gather list */ + unsigned short __use_sg; unsigned underflow; /* Return error if less than this amount is transferred */ @@ -128,7 +129,7 @@ extern void *scsi_kmap_atomic_sg(struct scatterlist *sg, int sg_count, extern void scsi_kunmap_atomic_sg(void *virt); extern struct scatterlist *scsi_alloc_sgtable(struct scsi_cmnd *, gfp_t); -extern void scsi_free_sgtable(struct scatterlist *, int); +extern void scsi_free_sgtable(struct scsi_cmnd *); extern int scsi_dma_map(struct scsi_cmnd *cmd); extern void scsi_dma_unmap(struct scsi_cmnd *cmd); @@ -148,6 +149,6 @@ static inline int scsi_get_resid(struct scsi_cmnd *cmd) } #define scsi_for_each_sg(cmd, sg, nseg, __i) \ - for (__i = 0, sg = scsi_sglist(cmd); __i < (nseg); __i++, (sg)++) + for_each_sg(scsi_sglist(cmd), sg, nseg, __i) #endif /* _SCSI_SCSI_CMND_H */ diff --git a/include/scsi/scsi_host.h b/include/scsi/scsi_host.h index 7d210cd6c38..0fd4746ee39 100644 --- a/include/scsi/scsi_host.h +++ b/include/scsi/scsi_host.h @@ -39,6 +39,9 @@ struct blk_queue_tags; #define DISABLE_CLUSTERING 0 #define ENABLE_CLUSTERING 1 +#define DISABLE_SG_CHAINING 0 +#define ENABLE_SG_CHAINING 1 + enum scsi_eh_timer_return { EH_NOT_HANDLED, EH_HANDLED, @@ -443,6 +446,15 @@ struct scsi_host_template { unsigned ordered_tag:1; /* + * true if the low-level driver can support sg chaining. this + * will be removed eventually when all the drivers are + * converted to support sg chaining. + * + * Status: OBSOLETE + */ + unsigned use_sg_chaining:1; + + /* * Countdown for host blocking with no commands outstanding */ unsigned int max_host_blocked; @@ -586,6 +598,7 @@ struct Scsi_Host { unsigned unchecked_isa_dma:1; unsigned use_clustering:1; unsigned use_blk_tcq:1; + unsigned use_sg_chaining:1; /* * Host has requested that no further requests come through for the |